You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/07/21 17:47:36 UTC
svn commit: r678454 - in /hadoop/hbase/trunk: bin/HBase.rb
src/java/org/apache/hadoop/hbase/regionserver/HStore.java
src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
Author: stack
Date: Mon Jul 21 08:47:35 2008
New Revision: 678454
URL: http://svn.apache.org/viewvc?rev=678454&view=rev
Log:
HBASE-756 In HBase shell, the put command doesn't process the timestamp
Modified:
hadoop/hbase/trunk/bin/HBase.rb
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
Modified: hadoop/hbase/trunk/bin/HBase.rb
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/bin/HBase.rb?rev=678454&r1=678453&r2=678454&view=diff
==============================================================================
--- hadoop/hbase/trunk/bin/HBase.rb (original)
+++ hadoop/hbase/trunk/bin/HBase.rb Mon Jul 21 08:47:35 2008
@@ -251,7 +251,7 @@
now = Time.now
bu = nil
if timestamp
- bu = BatchUpdate.new(row)
+ bu = BatchUpdate.new(row, timestamp)
else
bu = BatchUpdate.new(row)
end
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=678454&r1=678453&r2=678454&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Mon Jul 21 08:47:35 2008
@@ -840,8 +840,7 @@
int timesSeen = 0;
byte [] lastRow = null;
byte [] lastColumn = null;
- // Map of a row deletes keyed by column with a list of timestamps for value
- Map<byte [], List<Long>> deletes = null;
+
while (numDone < done.length) {
// Find the reader with the smallest key. If two files have same key
// but different values -- i.e. one is delete and other is non-delete
@@ -869,14 +868,9 @@
timesSeen++;
} else {
timesSeen = 0;
- // We are on to a new row. Create a new deletes list.
- deletes = new TreeMap<byte [], List<Long>>(Bytes.BYTES_COMPARATOR);
}
- byte [] value = (vals[smallestKey] == null)?
- null: vals[smallestKey].get();
- if (!isDeleted(sk, value, false, deletes) &&
- timesSeen <= family.getMaxVersions()) {
+ if (timesSeen <= family.getMaxVersions()) {
// Keep old versions until we have maxVersions worth.
// Then just skip them.
if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=678454&r1=678453&r2=678454&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Mon Jul 21 08:47:35 2008
@@ -144,16 +144,24 @@
// Assert that the first row is still deleted.
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
assertNull(cellValues);
- // Assert the store files do not have the first record 'aaa' keys in them.
+ // Make sure the store files do have some 'aaa' keys in them.
+ boolean containsStartRow = false;
for (MapFile.Reader reader: this.r.stores.
get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getReaders()) {
reader.reset();
HStoreKey key = new HStoreKey();
ImmutableBytesWritable val = new ImmutableBytesWritable();
while(reader.next(key, val)) {
- assertFalse(Bytes.equals(key.getRow(), STARTROW));
+ if (Bytes.equals(key.getRow(), STARTROW)) {
+ containsStartRow = true;
+ break;
+ }
+ }
+ if (containsStartRow) {
+ break;
}
}
+ assertTrue(containsStartRow);
}
private void createStoreFile(final HRegion region) throws IOException {