You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2009/05/18 07:17:31 UTC
svn commit: r775811 - in /hadoop/hbase/branches/0.19: CHANGES.txt
src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
src/test/org/apache/hadoop/hbase/client/TestHTable.java
Author: apurtell
Date: Mon May 18 05:17:31 2009
New Revision: 775811
URL: http://svn.apache.org/viewvc?rev=775811&view=rev
Log:
HBASE-1431 NPE in HTable.checkAndSave when row doesn't exist
Modified:
hadoop/hbase/branches/0.19/CHANGES.txt
hadoop/hbase/branches/0.19/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/branches/0.19/src/test/org/apache/hadoop/hbase/client/TestHTable.java
Modified: hadoop/hbase/branches/0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.19/CHANGES.txt?rev=775811&r1=775810&r2=775811&view=diff
==============================================================================
--- hadoop/hbase/branches/0.19/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.19/CHANGES.txt Mon May 18 05:17:31 2009
@@ -7,12 +7,15 @@
it has not been pending
HBASE-1362 .META. may not come back if regionserver crashes
(Nitay Joffe via Stack)
+ HBASE-1431 NPE in HTable.checkAndSave when row doesn't exist (Guilherme
+ Mauro Germoglio Barbosa via Andrew Purtell)
IMPROVEMENTS
HBASE-1418 Transacitonal improvments and fixes (Clint Morgan via Stack)
HBASE-1424 have shell print regioninfo and location on first load if
DEBUG enabled
- HBASE-1008 [performance] The replay of logs on server crash takes way too long
+ HBASE-1008 [performance] The replay of logs on server crash takes way too
+ long
HBASE-1401 close HLog (and open new one) if there hasnt been edits in
N minutes/hours
Modified: hadoop/hbase/branches/0.19/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.19/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=775811&r1=775810&r2=775811&view=diff
==============================================================================
--- hadoop/hbase/branches/0.19/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/branches/0.19/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Mon May 18 05:17:31 2009
@@ -1333,14 +1333,17 @@
Map<byte[],Cell> actualValues = this.getFull(row,keySet,
HConstants.LATEST_TIMESTAMP, 1,lid);
for (byte[] key : keySet) {
- // If test fails exit
- if(!Bytes.equals(actualValues.get(key).getValue(),
- expectedValues.get(key))) {
- success = false;
- break;
- }
- }
-
+ // If test fails exit
+ Cell cell = actualValues.get(key);
+ byte[] actualValue = new byte[] {};
+ if (cell != null)
+ actualValue = cell.getValue();
+ if(!Bytes.equals(actualValue,
+ expectedValues.get(key))) {
+ success = false;
+ break;
+ }
+ }
if (success) {
long commitTime = (b.getTimestamp() == LATEST_TIMESTAMP)?
System.currentTimeMillis(): b.getTimestamp();
Modified: hadoop/hbase/branches/0.19/src/test/org/apache/hadoop/hbase/client/TestHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.19/src/test/org/apache/hadoop/hbase/client/TestHTable.java?rev=775811&r1=775810&r2=775811&view=diff
==============================================================================
--- hadoop/hbase/branches/0.19/src/test/org/apache/hadoop/hbase/client/TestHTable.java (original)
+++ hadoop/hbase/branches/0.19/src/test/org/apache/hadoop/hbase/client/TestHTable.java Mon May 18 05:17:31 2009
@@ -64,12 +64,24 @@
BatchUpdate batchUpdate = new BatchUpdate(row);
BatchUpdate batchUpdate2 = new BatchUpdate(row);
BatchUpdate batchUpdate3 = new BatchUpdate(row);
+
+ // this row doesn't exist when checkAndSave is invoked
+ byte [] row1 = Bytes.toBytes("row1");
+ BatchUpdate batchUpdate4 = new BatchUpdate(row1);
+ // to be used for a checkAndSave for expected empty columns
+ BatchUpdate batchUpdate5 = new BatchUpdate(row);
+
HbaseMapWritable<byte[],byte[]> expectedValues =
new HbaseMapWritable<byte[],byte[]>();
HbaseMapWritable<byte[],byte[]> badExpectedValues =
new HbaseMapWritable<byte[],byte[]>();
-
+ HbaseMapWritable<byte[],byte[]> expectedNoValues =
+ new HbaseMapWritable<byte[],byte[]>();
+ // the columns used here must not be updated on batchupate
+ HbaseMapWritable<byte[],byte[]> expectedNoValues1 =
+ new HbaseMapWritable<byte[],byte[]>();
+
for(int i = 0; i < 5; i++) {
// This batchupdate is our initial batch update,
// As such we also set our expected values to the same values
@@ -79,7 +91,12 @@
badExpectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i),
Bytes.toBytes(500));
-
+
+ expectedNoValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), new byte[] {});
+ // the columns used here must not be updated on batchupate
+ expectedNoValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+i+","+i), new byte[] {});
+
+
// This is our second batchupdate that we will use to update the initial
// batchupdate
batchUpdate2.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+1));
@@ -87,6 +104,14 @@
// This final batch update is to check that our expected values (which
// are now wrong)
batchUpdate3.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+2));
+
+ // Batch update that will not happen because it is to happen with some
+ // expected values, but the row doesn't exist
+ batchUpdate4.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+
+ // Batch update will happen: the row exists, but the expected columns don't,
+ // just as the condition
+ batchUpdate5.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+3));
}
// Initialize rows
@@ -108,6 +133,58 @@
// make sure that the old expected values fail
assertFalse(table.checkAndSave(batchUpdate3, expectedValues,null));
+
+ // row doesn't exist, so doesn't matter the expected
+ // values (unless they are empty)
+ assertFalse(table.checkAndSave(batchUpdate4, badExpectedValues, null));
+
+ assertTrue(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+ // make sure check and save saves the data when expected values were empty and the row
+ // didn't exist
+ r = table.getRow(row1);
+ columns = batchUpdate4.getColumns();
+ for(int i = 0; i < columns.length;i++) {
+ assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate4.get(columns[i])));
+ }
+
+ // since the row isn't empty anymore, those expected (empty) values
+ // are not valid anymore, so check and save method doesn't save.
+ assertFalse(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+
+ // the row exists, but the columns don't. since the expected values are
+ // for columns without value, checkAndSave must be successful.
+ assertTrue(table.checkAndSave(batchUpdate5, expectedNoValues1, null));
+ // make sure checkAndSave saved values for batchUpdate5.
+ r = table.getRow(row);
+ columns = batchUpdate5.getColumns();
+ for(int i = 0; i < columns.length;i++) {
+ assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate5.get(columns[i])));
+ }
+
+ // since the condition wasn't changed, the following checkAndSave
+ // must also be successful.
+ assertTrue(table.checkAndSave(batchUpdate, expectedNoValues1, null));
+ // make sure checkAndSave saved values for batchUpdate1
+ r = table.getRow(row);
+ columns = batchUpdate.getColumns();
+ for(int i = 0; i < columns.length;i++) {
+ assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+ }
+
+ // one failing condition must make the following checkAndSave fail
+ // the failing condition is a column to be empty, however, it has a value.
+ HbaseMapWritable<byte[],byte[]> expectedValues1 =
+ new HbaseMapWritable<byte[],byte[]>();
+ expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+0), new byte[] {});
+ expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+"EMPTY+ROW"), new byte[] {});
+ assertFalse(table.checkAndSave(batchUpdate5, expectedValues1, null));
+
+ // assure the values on the row remain the same
+ r = table.getRow(row);
+ columns = batchUpdate.getColumns();
+ for(int i = 0; i < columns.length;i++) {
+ assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+ }
}
/**