You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by br...@apache.org on 2012/02/21 20:46:27 UTC

[3/3] git commit: Use longs for ttl expiry calculation in BRW Patch by Samarth Gahire, reviewed by brandonwilliams for CASSANDRA-3754

Use longs for ttl expiry calculation in BRW
Patch by Samarth Gahire, reviewed by brandonwilliams for CASSANDRA-3754


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/83cfa9dd
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/83cfa9dd
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/83cfa9dd

Branch: refs/heads/trunk
Commit: 83cfa9dd16d3f3bce8c60d7872d415ba58da8652
Parents: d760706
Author: Brandon Williams <br...@apache.org>
Authored: Tue Feb 21 08:31:50 2012 -0600
Committer: Brandon Williams <br...@apache.org>
Committed: Tue Feb 21 08:31:50 2012 -0600

----------------------------------------------------------------------
 .../apache/cassandra/hadoop/BulkRecordWriter.java  |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/83cfa9dd/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java b/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java
index 6f056c4..f5c4f55 100644
--- a/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java
+++ b/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java
@@ -154,7 +154,7 @@ implements org.apache.hadoop.mapred.RecordWriter<ByteBuffer,List<Mutation>>
                         if(column.ttl == 0)
                             writer.addColumn(column.name, column.value, column.timestamp);
                         else
-                            writer.addExpiringColumn(column.name, column.value, column.timestamp, column.ttl, System.currentTimeMillis() + (column.ttl * 1000));
+                            writer.addExpiringColumn(column.name, column.value, column.timestamp, column.ttl, System.currentTimeMillis() + ((long)column.ttl * 1000));
                     }
                 }
             }
@@ -167,7 +167,7 @@ implements org.apache.hadoop.mapred.RecordWriter<ByteBuffer,List<Mutation>>
                     if(mut.getColumn_or_supercolumn().column.ttl == 0)
 	                     writer.addColumn(mut.getColumn_or_supercolumn().column.name, mut.getColumn_or_supercolumn().column.value, mut.getColumn_or_supercolumn().column.timestamp);
                     else
-                        writer.addExpiringColumn(mut.getColumn_or_supercolumn().column.name, mut.getColumn_or_supercolumn().column.value, mut.getColumn_or_supercolumn().column.timestamp, mut.getColumn_or_supercolumn().column.ttl, System.currentTimeMillis() + (mut.getColumn_or_supercolumn().column.ttl * 1000));
+                        writer.addExpiringColumn(mut.getColumn_or_supercolumn().column.name, mut.getColumn_or_supercolumn().column.value, mut.getColumn_or_supercolumn().column.timestamp, mut.getColumn_or_supercolumn().column.ttl, System.currentTimeMillis() + ((long)(mut.getColumn_or_supercolumn().column.ttl) * 1000));
 	            }
             }
         }