You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/04/18 23:57:23 UTC

svn commit: r1327696 - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/master/AssignmentManager.java test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java

Author: stack
Date: Wed Apr 18 21:57:23 2012
New Revision: 1327696

URL: http://svn.apache.org/viewvc?rev=1327696&view=rev
Log:
HBASE-5811 TestLoadAndSwitchEncodeOnDisk fails sometimes

Modified:
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
    hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1327696&r1=1327695&r2=1327696&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Wed Apr 18 21:57:23 2012
@@ -1346,8 +1346,7 @@ public class AssignmentManager extends Z
    * @param region
    * @param setOfflineInZK
    * @param forceNewPlan
-   * @param hijack
-   *          - true new assignment is needed, false otherwise
+   * @param hijack True if new assignment is needed, false otherwise
    */
   public void assign(HRegionInfo region, boolean setOfflineInZK,
       boolean forceNewPlan, boolean hijack) {
@@ -1598,20 +1597,17 @@ public class AssignmentManager extends Z
       if (setOfflineInZK) {
         // get the version of the znode after setting it to OFFLINE.
         // versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
-        versionOfOfflineNode = setOfflineInZooKeeper(state,
-            hijack);
-        if(versionOfOfflineNode != -1){
+        versionOfOfflineNode = setOfflineInZooKeeper(state, hijack);
+        if (versionOfOfflineNode != -1) {
           if (isDisabledorDisablingRegionInRIT(region)) {
             return;
           }
           setEnabledTable(region);
         }
       }
-      
       if (setOfflineInZK && versionOfOfflineNode == -1) {
         return;
       }
-      
       if (this.master.isStopped()) {
         LOG.debug("Server stopped; skipping assign of " + state);
         return;
@@ -2218,12 +2214,16 @@ public class AssignmentManager extends Z
     LOG.info("Bulk assigning done");
   }
 
+  // TODO: This method seems way wrong.  Why would we mark a table enabled based
+  // off a single region?  We seem to call this on bulk assign on startup which
+  // isn't too bad but then its also called in assign.  It makes the enabled
+  // flag up in zk meaningless.  St.Ack
   private void setEnabledTable(HRegionInfo hri) {
     String tableName = hri.getTableNameAsString();
     boolean isTableEnabled = this.zkTable.isEnabledTable(tableName);
     if (!isTableEnabled) {
       setEnabledTable(tableName);
-    }    
+    }
   }
 
   /**

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java?rev=1327696&r1=1327695&r2=1327696&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java Wed Apr 18 21:57:23 2012
@@ -16,12 +16,21 @@
  */
 package org.apache.hadoop.hbase.io.encoding;
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Map;
+import java.util.NavigableMap;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -64,8 +73,10 @@ public class TestLoadAndSwitchEncodeOnDi
     super.loadTest();
 
     HColumnDescriptor hcd = getColumnDesc(admin);
-    System.err.println("\nDisabling encode-on-disk. Old column descriptor: " +
-        hcd + "\n");
+    System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
+    HTable t = new HTable(this.conf, TABLE);
+    assertAllOnLine(t);
+
     admin.disableTable(TABLE);
     hcd.setEncodeOnDisk(false);
     admin.modifyColumn(TABLE, hcd);
@@ -76,6 +87,10 @@ public class TestLoadAndSwitchEncodeOnDi
     System.err.println("\nNew column descriptor: " +
         getColumnDesc(admin) + "\n");
 
+    // The table may not have all regions on line yet.  Assert online before
+    // moving to major compact.
+    assertAllOnLine(t);
+
     System.err.println("\nCompacting the table\n");
     admin.majorCompact(TABLE);
     // Wait until compaction completes
@@ -88,4 +103,15 @@ public class TestLoadAndSwitchEncodeOnDi
     System.err.println("\nDone with the test, shutting down the cluster\n");
   }
 
+  private void assertAllOnLine(final HTable t) throws IOException {
+    NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
+    for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
+      byte [] startkey = e.getKey().getStartKey();
+      Scan s = new Scan(startkey);
+      ResultScanner scanner = t.getScanner(s);
+      Result r = scanner.next();
+      org.junit.Assert.assertTrue(r != null && r.size() > 0);
+      scanner.close();
+    }
+  }
 }