You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2009/05/18 07:33:18 UTC

svn commit: r775816 - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserver/ src/test/org/apache/hadoop/hbase/client/

Author: apurtell
Date: Mon May 18 05:33:17 2009
New Revision: 775816

URL: http://svn.apache.org/viewvc?rev=775816&view=rev
Log:
HBASE-1431, HBASE-1401

Modified:
    hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
    hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/CHANGES.txt Mon May 18 05:33:17 2009
@@ -131,6 +131,8 @@
    HBASE-1323  hbase-1234 broke TestThriftServer; fix and reenable
    HBASE-1425  ColumnValueFilter and WhileMatchFilter fixes on trunk
                (Clint Morgan via Stack)
+   HBASE-1431  NPE in HTable.checkAndSave when row doesn't exist (Guilherme
+               Mauro Germoglio Barbosa via Andrew Purtell)
 
   IMPROVEMENTS
    HBASE-1089  Add count of regions on filesystem to master UI; add percentage
@@ -254,6 +256,8 @@
                HLog#append?)
    HBASE-1429  Allow passing of a configuration object to HTablePool
    HBASE-1432  LuceneDocumentWrapper is not public
+   HBASE-1401  close HLog (and open new one) if there hasnt been edits in N
+               minutes/hours
 
   OPTIMIZATIONS
    HBASE-1412  Change values for delete column and column family in KeyValue

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java Mon May 18 05:33:17 2009
@@ -592,6 +592,7 @@
           // instantiate the location
           location = new HRegionLocation(regionInfo,
               new HServerAddress(serverAddress));
+          LOG.debug(location);
           cacheLocation(tableName, location);
           return location;
         } catch (TableNotFoundException e) {

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/master/HMaster.java Mon May 18 05:33:17 2009
@@ -998,6 +998,7 @@
     System.exit(0);
   }
 
+  @SuppressWarnings("null")
   protected static void doMain(String [] args,
       Class<? extends HMaster> masterClass) {
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Mon May 18 05:33:17 2009
@@ -132,7 +132,6 @@
   private final AtomicLong logSeqNum = new AtomicLong(0);
 
   private volatile long filenum = -1;
-  private volatile long old_filenum = -1;
   
   private final AtomicInteger numEntries = new AtomicInteger(0);
 
@@ -274,6 +273,10 @@
    * @throws IOException
    */
   public byte [] rollWriter() throws FailedLogCloseException, IOException {
+    // Return if nothing to flush.
+    if (this.writer != null && this.numEntries.get() <= 0) {
+      return null;
+    }
     byte [] regionToFlush = null;
     this.cacheFlushLock.lock();
     try {
@@ -283,9 +286,6 @@
       synchronized (updateLock) {
         // Clean up current writer.
         Path oldFile = cleanupCurrentWriter(this.filenum);
-        if (this.filenum >= 0) {
-          this.old_filenum = this.filenum;
-        }
         this.filenum = System.currentTimeMillis();
         Path newPath = computeFilename(this.filenum);
         this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath,
@@ -587,7 +587,7 @@
       }
     }
   }
-  
+
   private void requestLogRoll() {
     if (this.listener != null) {
       this.listener.logRollRequested();

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Mon May 18 05:33:17 2009
@@ -1396,13 +1396,17 @@
         Map<byte[],Cell> actualValues = getFull(row, keySet,
           HConstants.LATEST_TIMESTAMP, 1,lid);
         for (byte[] key : keySet) {
-          // If test fails exit
-          if(!Bytes.equals(actualValues.get(key).getValue(),
-              expectedValues.get(key))) {
-            success = false;
-            break;
-          }
-        }
+	  // If test fails exit
+	  Cell cell = actualValues.get(key);
+	  byte[] actualValue = new byte[] {};
+	  if (cell != null) 
+	    actualValue = cell.getValue();
+	  if(!Bytes.equals(actualValue,
+			   expectedValues.get(key))) {
+	    success = false;
+	    break;
+	  }
+	}
         if (success) {
           long commitTime = (b.getTimestamp() == LATEST_TIMESTAMP)?
             now: b.getTimestamp();

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java Mon May 18 05:33:17 2009
@@ -40,17 +40,32 @@
   private final ReentrantLock rollLock = new ReentrantLock();
   private final AtomicBoolean rollLog = new AtomicBoolean(false);
   private final HRegionServer server;
-  
+  private volatile long lastrolltime = System.currentTimeMillis();
+  // Period to roll log.
+  private final long rollperiod;
+
   /** @param server */
   public LogRoller(final HRegionServer server) {
     super();
     this.server = server;
+    this.rollperiod =
+      this.server.conf.getLong("hbase.regionserver.logroll.period", 3600000);
   }
 
   @Override
   public void run() {
     while (!server.isStopRequested()) {
+      long now = System.currentTimeMillis();
+      boolean periodic = false;
       if (!rollLog.get()) {
+        periodic = (now - this.lastrolltime) < this.rollperiod;
+        if (periodic) {
+          // Time for periodic roll
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Hlog roll period " + this.rollperiod + "ms elapsed");
+          }
+          break;
+        }
         synchronized (rollLog) {
           try {
             rollLog.wait(server.threadWakeFrequency);
@@ -62,6 +77,7 @@
       }
       rollLock.lock();          // Don't interrupt us. We're working
       try {
+        this.lastrolltime = now;
         byte [] regionToFlush = server.getLog().rollWriter();
         if (regionToFlush != null) {
           scheduleFlush(regionToFlush);
@@ -90,7 +106,7 @@
     }
     LOG.info("LogRoller exiting.");
   }
-  
+
   private void scheduleFlush(final byte [] region) {
     boolean scheduled = false;
     HRegion r = this.server.getOnlineRegion(region);
@@ -114,7 +130,7 @@
       rollLog.notifyAll();
     }
   }
-  
+
   /**
    * Called by region server to wake up this thread if it sleeping.
    * It is sleeping if rollLock is not held.
@@ -127,4 +143,4 @@
       rollLock.unlock();
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java?rev=775816&r1=775815&r2=775816&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestHTable.java Mon May 18 05:33:17 2009
@@ -235,12 +235,24 @@
     BatchUpdate batchUpdate = new BatchUpdate(row);
     BatchUpdate batchUpdate2 = new BatchUpdate(row);
     BatchUpdate batchUpdate3 = new BatchUpdate(row);
+
+    // this row doesn't exist when checkAndSave is invoked
+    byte [] row1 = Bytes.toBytes("row1");
+    BatchUpdate batchUpdate4 = new BatchUpdate(row1);
     
+    // to be used for a checkAndSave for expected empty columns
+    BatchUpdate batchUpdate5 = new BatchUpdate(row);
+
     HbaseMapWritable<byte[],byte[]> expectedValues =
       new HbaseMapWritable<byte[],byte[]>();
     HbaseMapWritable<byte[],byte[]> badExpectedValues =
       new HbaseMapWritable<byte[],byte[]>();
-    
+    HbaseMapWritable<byte[],byte[]> expectedNoValues =
+      new HbaseMapWritable<byte[],byte[]>();
+    // the columns used here must not be updated on batchupate
+    HbaseMapWritable<byte[],byte[]> expectedNoValues1 =
+      new HbaseMapWritable<byte[],byte[]>();
+
     for(int i = 0; i < 5; i++) {
       // This batchupdate is our initial batch update,
       // As such we also set our expected values to the same values
@@ -250,7 +262,12 @@
       
       badExpectedValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i),
         Bytes.toBytes(500));
-      
+
+      expectedNoValues.put(Bytes.toBytes(COLUMN_FAMILY_STR+i), new byte[] {});
+      // the columns used here must not be updated on batchupate
+      expectedNoValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+i+","+i), new byte[] {});
+
+
       // This is our second batchupdate that we will use to update the initial
       // batchupdate
       batchUpdate2.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+1));
@@ -258,6 +275,14 @@
       // This final batch update is to check that our expected values (which
       // are now wrong)
       batchUpdate3.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+2));
+
+      // Batch update that will not happen because it is to happen with some 
+      // expected values, but the row doesn't exist
+      batchUpdate4.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i));
+
+      // Batch update will happen: the row exists, but the expected columns don't,
+      // just as the condition
+      batchUpdate5.put(COLUMN_FAMILY_STR+i, Bytes.toBytes(i+3));
     }
     
     // Initialize rows
@@ -279,6 +304,58 @@
     
     // make sure that the old expected values fail
     assertFalse(table.checkAndSave(batchUpdate3, expectedValues,null));
+
+    // row doesn't exist, so doesn't matter the expected 
+    // values (unless they are empty) 
+    assertFalse(table.checkAndSave(batchUpdate4, badExpectedValues, null));
+
+    assertTrue(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+    // make sure check and save saves the data when expected values were empty and the row
+    // didn't exist
+    r = table.getRow(row1);
+    columns = batchUpdate4.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate4.get(columns[i])));
+    }  
+
+    // since the row isn't empty anymore, those expected (empty) values 
+    // are not valid anymore, so check and save method doesn't save. 
+    assertFalse(table.checkAndSave(batchUpdate4, expectedNoValues, null));
+    
+    // the row exists, but the columns don't. since the expected values are 
+    // for columns without value, checkAndSave must be successful. 
+    assertTrue(table.checkAndSave(batchUpdate5, expectedNoValues1, null));
+    // make sure checkAndSave saved values for batchUpdate5.
+    r = table.getRow(row);
+    columns = batchUpdate5.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate5.get(columns[i])));
+    }  
+
+    // since the condition wasn't changed, the following checkAndSave 
+    // must also be successful.
+    assertTrue(table.checkAndSave(batchUpdate, expectedNoValues1, null));
+    // make sure checkAndSave saved values for batchUpdate1
+    r = table.getRow(row);
+    columns = batchUpdate.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+    }
+
+    // one failing condition must make the following checkAndSave fail
+    // the failing condition is a column to be empty, however, it has a value.
+    HbaseMapWritable<byte[],byte[]> expectedValues1 =
+      new HbaseMapWritable<byte[],byte[]>();
+    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+0), new byte[] {});
+    expectedValues1.put(Bytes.toBytes(COLUMN_FAMILY_STR+"EMPTY+ROW"), new byte[] {});
+    assertFalse(table.checkAndSave(batchUpdate5, expectedValues1, null));
+
+    // assure the values on the row remain the same
+    r = table.getRow(row);
+    columns = batchUpdate.getColumns();
+    for(int i = 0; i < columns.length;i++) {
+      assertTrue(Bytes.equals(r.get(columns[i]).getValue(),batchUpdate.get(columns[i])));
+    }    
   }
 
   /**
@@ -373,5 +450,5 @@
      fail("Should have thrown a TableNotFoundException instead of a " +
        e.getClass());
    }
-  }
+   }
 }