You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2013/02/14 13:58:21 UTC

svn commit: r1446147 [32/35] - in /hbase/branches/hbase-7290v2: ./ bin/ conf/ dev-support/ hbase-client/ hbase-common/ hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/ hbase-common/src/...

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java Thu Feb 14 12:58:12 2013
@@ -21,8 +21,10 @@ package org.apache.hadoop.hbase.regionse
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.List;
@@ -87,9 +89,9 @@ public class TestSplitTransactionOnClust
   private MiniHBaseCluster cluster = null;
   private static final int NB_SERVERS = 2;
   private static CountDownLatch latch = new CountDownLatch(1);
-  private static boolean secondSplit = false;
-  private static boolean callRollBack = false;
-  private static boolean firstSplitCompleted = false;
+  private static volatile boolean secondSplit = false;
+  private static volatile boolean callRollBack = false;
+  private static volatile boolean firstSplitCompleted = false;
 
   private static final HBaseTestingUtility TESTING_UTIL =
     new HBaseTestingUtility();
@@ -118,7 +120,86 @@ public class TestSplitTransactionOnClust
     return regions.get(0).getRegionInfo();
   }
 
-  /**
+  @Test(timeout = 20000)
+  public void testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack() throws Exception {
+    final byte[] tableName = Bytes
+        .toBytes("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack");
+    HBaseAdmin admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
+    try {
+      // Create table then get the single region for our new table.
+      HTable t = createTableAndWait(tableName, Bytes.toBytes("cf"));
+      final List<HRegion> regions = cluster.getRegions(tableName);
+      HRegionInfo hri = getAndCheckSingleTableRegion(regions);
+      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
+      final HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
+      insertData(tableName, admin, t);
+      // Turn off balancer so it doesn't cut in and mess up our placements.
+      this.admin.setBalancerRunning(false, false);
+      // Turn off the meta scanner so it don't remove parent on us.
+      cluster.getMaster().setCatalogJanitorEnabled(false);
+
+      new Thread() {
+        public void run() {
+          SplitTransaction st = null;
+          st = new MockedSplitTransaction(regions.get(0), Bytes.toBytes("row2"));
+          try {
+            st.prepare();
+            st.execute(regionServer, regionServer);
+          } catch (IOException e) {
+
+          }
+        }
+      }.start();
+      for (int i = 0; !callRollBack && i < 100; i++) {
+        Thread.sleep(100);
+      }
+      assertTrue("Waited too long for rollback", callRollBack);
+      SplitTransaction st = null;
+      st = new MockedSplitTransaction(regions.get(0), Bytes.toBytes("row2"));
+      try {
+        secondSplit = true;
+        st.prepare();
+        st.execute(regionServer, regionServer);
+      } catch (IOException e) {
+        LOG.debug("Rollback started :"+ e.getMessage());
+        st.rollback(regionServer, regionServer);
+      }
+      for (int i=0; !firstSplitCompleted && i<100; i++) {
+        Thread.sleep(100);
+      }
+      assertTrue("fist split did not complete", firstSplitCompleted);
+
+      RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates();
+      Map<String, RegionState> rit = regionStates.getRegionsInTransition();
+      
+      for (int i=0; rit.containsKey(hri.getTableNameAsString()) && i<100; i++) {
+        Thread.sleep(100);
+      }
+      assertFalse("region still in transition", rit.containsKey(rit.containsKey(hri.getTableNameAsString())));
+
+      List<HRegion> onlineRegions = regionServer.getOnlineRegions(tableName);
+      // Region server side split is successful.
+      assertEquals("The parent region should be splitted", 2, onlineRegions.size());
+      //Should be present in RIT
+      List<HRegionInfo> regionsOfTable = cluster.getMaster().getAssignmentManager()
+          .getRegionStates().getRegionsOfTable(tableName);
+      // Master side should also reflect the same
+      assertEquals("No of regions in master", 2, regionsOfTable.size());
+    } finally {
+      admin.setBalancerRunning(true, false);
+      secondSplit = false;
+      firstSplitCompleted = false;
+      callRollBack = false;
+      cluster.getMaster().setCatalogJanitorEnabled(true);
+      if (admin.isTableAvailable(tableName) && admin.isTableEnabled(tableName)) {
+        admin.disableTable(tableName);
+        admin.deleteTable(tableName);
+        admin.close();
+      }
+    }
+  }
+
+ /**
    * A test that intentionally has master fail the processing of the split message.
    * Tests that the regionserver split ephemeral node gets cleaned up if it
    * crashes and that after we process server shutdown, the daughters are up on
@@ -135,8 +216,7 @@ public class TestSplitTransactionOnClust
       Bytes.toBytes("ephemeral");
 
     // Create table then get the single region for our new table.
-    HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-
+    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -175,12 +255,8 @@ public class TestSplitTransactionOnClust
       // Now crash the server
       cluster.abortRegionServer(tableRegionIndex);
       waitUntilRegionServerDead();
+      awaitDaughters(tableName, daughters.size());
 
-      // Wait till regions are back on line again.
-      while(cluster.getRegions(tableName).size() < daughters.size()) {
-        LOG.info("Waiting for repair to happen");
-        Thread.sleep(1000);
-      }
       // Assert daughters are online.
       regions = cluster.getRegions(tableName);
       for (HRegion r: regions) {
@@ -200,6 +276,7 @@ public class TestSplitTransactionOnClust
       SplitRegionHandler.TEST_SKIP = false;
       admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
+      t.close();
     }
   }
 
@@ -209,8 +286,7 @@ public class TestSplitTransactionOnClust
       Bytes.toBytes("testExistingZnodeBlocksSplitAndWeRollback");
 
     // Create table then get the single region for our new table.
-    HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-
+    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -251,6 +327,7 @@ public class TestSplitTransactionOnClust
     } finally {
       admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
+      t.close();
     }
   }
 
@@ -267,8 +344,7 @@ public class TestSplitTransactionOnClust
     final byte [] tableName = Bytes.toBytes("testShutdownSimpleFixup");
 
     // Create table then get the single region for our new table.
-    HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-
+    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -295,11 +371,7 @@ public class TestSplitTransactionOnClust
       // Now crash the server
       cluster.abortRegionServer(tableRegionIndex);
       waitUntilRegionServerDead();
-      // Wait till regions are back on line again.
-      while(cluster.getRegions(tableName).size() < daughters.size()) {
-        LOG.info("Waiting for repair to happen");
-        Thread.sleep(1000);
-      }
+      awaitDaughters(tableName, daughters.size());
       // Assert daughters are online.
       regions = cluster.getRegions(tableName);
       for (HRegion r: regions) {
@@ -308,6 +380,7 @@ public class TestSplitTransactionOnClust
     } finally {
       admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
+      t.close();
     }
   }
 
@@ -323,8 +396,7 @@ public class TestSplitTransactionOnClust
       Bytes.toBytes("testShutdownFixupWhenDaughterHasSplit");
 
     // Create table then get the single region for our new table.
-    HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-
+    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -357,21 +429,18 @@ public class TestSplitTransactionOnClust
         if (r.getRegionInfo().equals(daughter)) daughterRegion = r;
       }
       assertTrue(daughterRegion != null);
-      while (true) {
+      for (int i=0; i<100; i++) {
         if (!daughterRegion.hasReferences()) break;
         Threads.sleep(100);
       }
+      assertFalse("Waiting for refereces to be compacted", daughterRegion.hasReferences());
       split(daughter, server, regionCount);
       // Get list of daughters
       daughters = cluster.getRegions(tableName);
       // Now crash the server
       cluster.abortRegionServer(tableRegionIndex);
       waitUntilRegionServerDead();
-      // Wait till regions are back on line again.
-      while(cluster.getRegions(tableName).size() < daughters.size()) {
-        LOG.info("Waiting for repair to happen");
-        Thread.sleep(1000);
-      }
+      awaitDaughters(tableName, daughters.size());
       // Assert daughters are online and ONLY the original daughters -- that
       // fixup didn't insert one during server shutdown recover.
       regions = cluster.getRegions(tableName);
@@ -382,6 +451,7 @@ public class TestSplitTransactionOnClust
     } finally {
       admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
+      t.close();
     }
   }
   
@@ -402,8 +472,7 @@ public class TestSplitTransactionOnClust
     final byte[] tableName = Bytes.toBytes("testMasterRestartWhenSplittingIsPartial");
 
     // Create table then get the single region for our new table.
-    HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-
+    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -457,6 +526,7 @@ public class TestSplitTransactionOnClust
       SplitRegionHandler.TEST_SKIP = false;
       admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
+      t.close();
     }
   }
 
@@ -475,8 +545,7 @@ public class TestSplitTransactionOnClust
     final byte[] tableName = Bytes.toBytes("testMasterRestartAtRegionSplitPendingCatalogJanitor");
 
     // Create table then get the single region for our new table.
-    HTable t = TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
-
+    HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
 
@@ -508,12 +577,14 @@ public class TestSplitTransactionOnClust
       byte[] data = ZKUtil.getDataNoWatch(t.getConnection()
           .getZooKeeperWatcher(), node, stat);
       // ZKUtil.create
-      while (data != null) {
+      for (int i=0; data != null && i<60; i++) {
         Thread.sleep(1000);
         data = ZKUtil.getDataNoWatch(t.getConnection().getZooKeeperWatcher(),
             node, stat);
 
       }
+      assertNull("Waited too long for ZK node to be removed: "+node, data);
+
       MockMasterWithoutCatalogJanitor master = abortAndWaitForMaster();
 
       this.admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
@@ -528,6 +599,7 @@ public class TestSplitTransactionOnClust
       SplitRegionHandler.TEST_SKIP = false;
       this.admin.setBalancerRunning(true, false);
       cluster.getMaster().setCatalogJanitorEnabled(true);
+      t.close();
     }
   }
 
@@ -543,92 +615,12 @@ public class TestSplitTransactionOnClust
    * @throws KeeperException
    */
   @Test
-  public void testSplitBeforeSettingSplittingInZK() throws IOException,
+  public void testSplitBeforeSettingSplittingInZK() throws Exception,
       InterruptedException, KeeperException {
     testSplitBeforeSettingSplittingInZKInternals();
   }
   
   @Test(timeout = 20000)
-  public void testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack() throws Exception {
-    final byte[] tableName = Bytes
-        .toBytes("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack");
-    HBaseAdmin admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
-    try {
-      // Create table then get the single region for our new table.
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      htd.addFamily(new HColumnDescriptor("cf"));
-      admin.createTable(htd);
-      HTable t = new HTable(cluster.getConfiguration(), tableName);
-      while (!(cluster.getRegions(tableName).size() == 1)) {
-        Thread.sleep(100);
-      }
-      final List<HRegion> regions = cluster.getRegions(tableName);
-      HRegionInfo hri = getAndCheckSingleTableRegion(regions);
-      int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
-      final HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
-      insertData(tableName, admin, t);
-      // Turn off balancer so it doesn't cut in and mess up our placements.
-      this.admin.setBalancerRunning(false, false);
-      // Turn off the meta scanner so it don't remove parent on us.
-      cluster.getMaster().setCatalogJanitorEnabled(false);
-
-      new Thread() {
-        public void run() {
-          SplitTransaction st = null;
-          st = new MockedSplitTransaction(regions.get(0), Bytes.toBytes("row2"));
-          try {
-            st.prepare();
-            st.execute(regionServer, regionServer);
-          } catch (IOException e) {
-
-          }
-        }
-      }.start();
-      while (!callRollBack) {
-        Thread.sleep(100);
-      }
-      SplitTransaction st = null;
-      st = new MockedSplitTransaction(regions.get(0), Bytes.toBytes("row2"));
-      try {
-        secondSplit = true;
-        st.prepare();
-        st.execute(regionServer, regionServer);
-      } catch (IOException e) {
-        LOG.debug("Rollback started :"+ e.getMessage());
-        st.rollback(regionServer, regionServer);
-      }
-      while (!firstSplitCompleted) {
-        Thread.sleep(100);
-      }
-      RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates();
-      Map<String, RegionState> rit = regionStates.getRegionsInTransition();
-      
-      while (rit.containsKey(hri.getTableNameAsString())) {
-        Thread.sleep(100);
-      }
-      List<HRegion> onlineRegions = regionServer.getOnlineRegions(tableName);
-      // Region server side split is successful.
-      assertEquals("The parent region should be splitted", 2, onlineRegions.size());
-      //Should be present in RIT
-      List<HRegionInfo> regionsOfTable = cluster.getMaster().getAssignmentManager()
-          .getRegionStates().getRegionsOfTable(tableName);
-      // Master side should also reflect the same
-      assertEquals("No of regions in master", 2, regionsOfTable.size());
-    } finally {
-      admin.setBalancerRunning(true, false);
-      secondSplit = false;
-      firstSplitCompleted = false;
-      callRollBack = false;
-      cluster.getMaster().setCatalogJanitorEnabled(true);
-      if (admin.isTableAvailable(tableName) && admin.isTableEnabled(tableName)) {
-        admin.disableTable(tableName);
-        admin.deleteTable(tableName);
-        admin.close();
-      }
-    }
-  }
-
-  @Test(timeout = 20000)
   public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception {
     final byte[] tableName = 
         Bytes.toBytes("testTableExistsIfTheSpecifiedTableRegionIsSplitParent");
@@ -637,10 +629,7 @@ public class TestSplitTransactionOnClust
     HBaseAdmin admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
     try {
       // Create table then get the single region for our new table.
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      htd.addFamily(new HColumnDescriptor("cf"));
-      admin.createTable(htd);
-      HTable t = new HTable(cluster.getConfiguration(), tableName);
+      HTable t = createTableAndWait(tableName, Bytes.toBytes("cf"));
       regions = cluster.getRegions(tableName);
       int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
       regionServer = cluster.getRegionServer(regionServerIndex);
@@ -692,7 +681,7 @@ public class TestSplitTransactionOnClust
       throws Exception {
     final byte[] tableName = Bytes.toBytes("testRollBackShudBeSuccessfulIfStoreFileIsEmpty");
     // Create table then get the single region for our new table.
-    TESTING_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
+    createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
     List<HRegion> regions = cluster.getRegions(tableName);
     HRegionInfo hri = getAndCheckSingleTableRegion(regions);
     int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
@@ -727,8 +716,7 @@ public class TestSplitTransactionOnClust
 
   }
 
-  private void testSplitBeforeSettingSplittingInZKInternals() throws IOException,
-      KeeperException {
+  private void testSplitBeforeSettingSplittingInZKInternals() throws Exception {
     final byte[] tableName = Bytes.toBytes("testSplitBeforeSettingSplittingInZK");
     HBaseAdmin admin = TESTING_UTIL.getHBaseAdmin();
     try {
@@ -736,8 +724,17 @@ public class TestSplitTransactionOnClust
       HTableDescriptor htd = new HTableDescriptor(tableName);
       htd.addFamily(new HColumnDescriptor("cf"));
       admin.createTable(htd);
-
-      List<HRegion> regions = cluster.getRegions(tableName);
+      for (int i = 0; cluster.getRegions(tableName).size() == 0 && i < 100; i++) {
+        Thread.sleep(100);
+      }
+      assertTrue("Table not online", cluster.getRegions(tableName).size() != 0);
+        
+      List<HRegion> regions = null;
+      for (int i=0; i<100; i++) {
+        regions = cluster.getRegions(tableName);
+        if (regions.size() > 0) break;
+        Thread.sleep(100);
+      }
       int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
       HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
       SplitTransaction st = null;
@@ -841,10 +838,12 @@ public class TestSplitTransactionOnClust
       final int regionCount)
   throws IOException, InterruptedException {
     this.admin.split(hri.getRegionNameAsString());
-    while (ProtobufUtil.getOnlineRegions(server).size() <= regionCount) {
+    for (int i = 0; ProtobufUtil.getOnlineRegions(server).size() <= regionCount && i < 100; i++) {
       LOG.debug("Waiting on region to split");
       Thread.sleep(100);
     }
+    assertFalse("Waited too long for split",
+        ProtobufUtil.getOnlineRegions(server).size() <= regionCount);
   }
 
   private void removeDaughterFromMeta(final byte [] regionName) throws IOException {
@@ -891,13 +890,15 @@ public class TestSplitTransactionOnClust
         Bytes.toBytes(hrs.getServerName().toString()));
     }
     // Wait till table region is up on the server that is NOT carrying .META..
-    while (true) {
+    for (int i=0; i<100; i++) {
       tableRegionIndex = cluster.getServerWith(hri.getRegionName());
       if (tableRegionIndex != -1 && tableRegionIndex != metaServerIndex) break;
       LOG.debug("Waiting on region move off the .META. server; current index " +
         tableRegionIndex + " and metaServerIndex=" + metaServerIndex);
       Thread.sleep(100);
     }
+    assertTrue("Region not moved off .META. server", tableRegionIndex != -1
+        && tableRegionIndex != metaServerIndex);
     // Verify for sure table region is not on same server as .META.
     tableRegionIndex = cluster.getServerWith(hri.getRegionName());
     assertTrue(tableRegionIndex != -1);
@@ -935,13 +936,37 @@ public class TestSplitTransactionOnClust
 
   private void waitUntilRegionServerDead() throws InterruptedException {
     // Wait until the master processes the RS shutdown
-    while (cluster.getMaster().getClusterStatus().
-        getServers().size() == NB_SERVERS) {
+    for (int i=0; cluster.getMaster().getClusterStatus().
+        getServers().size() == NB_SERVERS && i<100; i++) {
       LOG.info("Waiting on server to go down");
       Thread.sleep(100);
     }
+    assertFalse("Waited too long for RS to die", cluster.getMaster().getClusterStatus().
+        getServers().size() == NB_SERVERS);
+  }
+
+  private void awaitDaughters(byte[] tableName, int numDaughters) throws InterruptedException {
+    // Wait till regions are back on line again.
+    for (int i=0; cluster.getRegions(tableName).size() < numDaughters && i<60; i++) {
+      LOG.info("Waiting for repair to happen");
+      Thread.sleep(1000);
+    }
+    if (cluster.getRegions(tableName).size() < numDaughters) {
+      fail("Waiting too long for daughter regions");
+    }
   }
   
+  private HTable createTableAndWait(byte[] tableName, byte[] cf) throws IOException,
+      InterruptedException {
+    HTable t = TESTING_UTIL.createTable(tableName, cf);
+    for (int i = 0; cluster.getRegions(tableName).size() == 0 && i < 100; i++) {
+      Thread.sleep(100);
+    }
+    assertTrue("Table not online: " + Bytes.toString(tableName), cluster.getRegions(tableName)
+        .size() != 0);
+    return t;
+  }
+
   public static class MockMasterWithoutCatalogJanitor extends HMaster {
 
     public MockMasterWithoutCatalogJanitor(Configuration conf) throws IOException, KeeperException,

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Thu Feb 14 12:58:12 2013
@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
@@ -60,9 +59,11 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.apache.hadoop.util.Progressable;
 import org.junit.experimental.categories.Category;
@@ -133,9 +134,15 @@ public class TestStore extends TestCase 
     hcd.setMaxVersions(4);
     init(methodName, conf, hcd);
   }
-  
+
   private void init(String methodName, Configuration conf,
       HColumnDescriptor hcd) throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(table);
+    init(methodName, conf, htd, hcd);
+  }
+
+  private void init(String methodName, Configuration conf, HTableDescriptor htd,
+      HColumnDescriptor hcd) throws IOException {
     //Setting up a Store
     Path basedir = new Path(DIR+methodName);
     String logName = "logs";
@@ -145,7 +152,6 @@ public class TestStore extends TestCase 
 
     fs.delete(logdir, true);
 
-    HTableDescriptor htd = new HTableDescriptor(table);
     htd.addFamily(hcd);
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
@@ -186,6 +192,8 @@ public class TestStore extends TestCase 
   public void testDeleteExpiredStoreFiles() throws Exception {
     int storeFileNum = 4;
     int ttl = 4;
+    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
+    EnvironmentEdgeManagerTestHelper.injectEdge(edge);
     
     Configuration conf = HBaseConfiguration.create();
     // Enable the expired store file deletion
@@ -194,7 +202,7 @@ public class TestStore extends TestCase 
     hcd.setTimeToLive(ttl);
     init(getName(), conf, hcd);
 
-    long sleepTime = this.store.scanInfo.getTtl() / storeFileNum;
+    long sleepTime = this.store.getScanInfo().getTtl() / storeFileNum;
     long timeStamp;
     // There are 4 store files and the max time stamp difference among these
     // store files will be (this.store.ttl / storeFileNum)
@@ -205,7 +213,7 @@ public class TestStore extends TestCase 
       this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null));
       this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null));
       flush(i);
-      Thread.sleep(sleepTime);
+      edge.incrementTime(sleepTime);
     }
 
     // Verify the total number of store files
@@ -221,15 +229,16 @@ public class TestStore extends TestCase 
       assertEquals(Math.min(i, 2), cr.getFiles().size());
       for (int j = 0; i < cr.getFiles().size(); j++) {
         assertTrue(cr.getFiles().get(j).getReader().getMaxTimestamp() < (System
-            .currentTimeMillis() - this.store.scanInfo.getTtl()));
+            .currentTimeMillis() - this.store.getScanInfo().getTtl()));
       }
       // Verify that the expired store file is compacted to an empty store file.
-      StoreFile compactedFile = this.store.compact(cr);
+      // Default compaction policy creates just one and only one compacted file.
+      StoreFile compactedFile = this.store.compact(cr).get(0);
       // It is an empty store file.
       assertEquals(0, compactedFile.getReader().getEntries());
 
       // Let the next store file expired.
-      Thread.sleep(sleepTime);
+      edge.incrementTime(sleepTime);
     }
   }
 
@@ -814,5 +823,34 @@ public class TestStore extends TestCase 
     store.getHRegion().clearSplit_TESTS_ONLY();
   }
 
+  public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
+    final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
+    long anyValue = 10;
+
+    // We'll check that it uses correct config and propagates it appropriately by going thru
+    // the simplest "real" path I can find - "throttleCompaction", which just checks whether
+    // a number we pass in is higher than some config value, inside compactionPolicy.
+    Configuration conf = HBaseConfiguration.create();
+    conf.setLong(CONFIG_KEY, anyValue);
+    init(getName() + "-xml", conf);
+    assertTrue(store.throttleCompaction(anyValue + 1));
+    assertFalse(store.throttleCompaction(anyValue));
+
+    // HTD overrides XML.
+    --anyValue;
+    HTableDescriptor htd = new HTableDescriptor(table);
+    HColumnDescriptor hcd = new HColumnDescriptor(family);
+    htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
+    init(getName() + "-htd", conf, htd, hcd);
+    assertTrue(store.throttleCompaction(anyValue + 1));
+    assertFalse(store.throttleCompaction(anyValue));
+
+    // HCD overrides them both.
+    --anyValue;
+    hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
+    init(getName() + "-hcd", conf, htd, hcd);
+    assertTrue(store.throttleCompaction(anyValue + 1));
+    assertFalse(store.throttleCompaction(anyValue));
+  }
 }
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Thu Feb 14 12:58:12 2013
@@ -27,12 +27,10 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeSet;
-import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestCase;
@@ -42,8 +40,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.HalfStoreFileReader;
-import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -53,7 +49,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
-import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
@@ -103,7 +98,7 @@ public class TestStoreFile extends HBase
             .build();
     writeStoreFile(writer);
     checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
   }
 
   private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
@@ -150,7 +145,7 @@ public class TestStoreFile extends HBase
             .build();
     writeStoreFile(writer);
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     StoreFile.Reader reader = hsf.createReader();
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
@@ -162,7 +157,7 @@ public class TestStoreFile extends HBase
     // Make a reference
     Path refPath = StoreFile.split(fs, storedir, hsf, midRow, true);
     StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
     HFileScanner s = refHsf.createReader().getScanner(false, false);
@@ -202,8 +197,8 @@ public class TestStoreFile extends HBase
                   HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
 
     // Try to open store file from link
-    StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+    StoreFile hsf = new StoreFile(this.fs, linkFilePath, conf, cacheConf,
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     assertTrue(hsf.isLink());
 
     // Now confirm that I can read from the link
@@ -292,7 +287,7 @@ public class TestStoreFile extends HBase
     
     // Try to open store file from link
     StoreFile hsfA = new StoreFile(this.fs, pathA,  conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
 
     // Now confirm that I can read from the ref to link
     int count = 1;
@@ -305,7 +300,7 @@ public class TestStoreFile extends HBase
     
     // Try to open store file from link
     StoreFile hsfB = new StoreFile(this.fs, pathB,  conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
 
     // Now confirm that I can read from the ref to link
     HFileScanner sB = hsfB.createReader().getScanner(false, false);
@@ -398,10 +393,10 @@ public class TestStoreFile extends HBase
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
       top = new StoreFile(this.fs, topPath, conf, cacheConf,
-          StoreFile.BloomType.NONE,
+          BloomType.NONE,
           NoOpDataBlockEncoder.INSTANCE).createReader();
       bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
-          StoreFile.BloomType.NONE,
+          BloomType.NONE,
           NoOpDataBlockEncoder.INSTANCE).createReader();
       bottomScanner = bottom.getScanner(false, false);
       int count = 0;
@@ -444,10 +439,10 @@ public class TestStoreFile extends HBase
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
       top = new StoreFile(this.fs, topPath, conf, cacheConf,
-          StoreFile.BloomType.NONE,
+          BloomType.NONE,
           NoOpDataBlockEncoder.INSTANCE).createReader();
       bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
-          StoreFile.BloomType.NONE,
+          BloomType.NONE,
           NoOpDataBlockEncoder.INSTANCE).createReader();
       first = true;
       bottomScanner = bottom.getScanner(false, false);
@@ -547,7 +542,7 @@ public class TestStoreFile extends HBase
     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
         StoreFile.DEFAULT_BLOCKSIZE_SMALL)
             .withFilePath(f)
-            .withBloomType(StoreFile.BloomType.ROW)
+            .withBloomType(BloomType.ROW)
             .withMaxKeyCount(2000)
             .withChecksumType(CKTYPE)
             .withBytesPerChecksum(CKBYTES)
@@ -624,8 +619,8 @@ public class TestStoreFile extends HBase
     int versions = 2;
 
     // run once using columns and once using rows
-    StoreFile.BloomType[] bt =
-      {StoreFile.BloomType.ROWCOL, StoreFile.BloomType.ROW};
+    BloomType[] bt =
+      {BloomType.ROWCOL, BloomType.ROW};
     int[] expKeys    = {rowCount*colCount, rowCount};
     // below line deserves commentary.  it is expected bloom false positives
     //  column = rowCount*2*colCount inserts
@@ -683,7 +678,7 @@ public class TestStoreFile extends HBase
               scanner.shouldUseScanner(scan, columns, Long.MIN_VALUE);
           boolean shouldRowExist = i % 2 == 0;
           boolean shouldColExist = j % 2 == 0;
-          shouldColExist = shouldColExist || bt[x] == StoreFile.BloomType.ROW;
+          shouldColExist = shouldColExist || bt[x] == BloomType.ROW;
           if (shouldRowExist && shouldColExist) {
             if (!exists) falseNeg++;
           } else {
@@ -701,60 +696,6 @@ public class TestStoreFile extends HBase
     }
   }
 
-  public void testBloomEdgeCases() throws Exception {
-    float err = (float)0.005;
-    FileSystem fs = FileSystem.getLocal(conf);
-    Path f = new Path(ROOT_DIR, getName());
-    conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, err);
-    conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
-    conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS, 1000);
-
-    // This test only runs for HFile format version 1.
-    conf.setInt(HFile.FORMAT_VERSION_KEY, 1);
-
-    // this should not create a bloom because the max keys is too small
-    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
-            .withFilePath(f)
-            .withBloomType(StoreFile.BloomType.ROW)
-            .withMaxKeyCount(2000)
-            .withChecksumType(CKTYPE)
-            .withBytesPerChecksum(CKBYTES)
-            .build();
-    assertFalse(writer.hasGeneralBloom());
-    writer.close();
-    fs.delete(f, true);
-
-    conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_MAX_KEYS,
-        Integer.MAX_VALUE);
-
-    // TODO: commented out because we run out of java heap space on trunk
-    // the below config caused IllegalArgumentException in our production cluster
-    // however, the resulting byteSize is < MAX_INT, so this should work properly
-    writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
-            .withFilePath(f)
-            .withBloomType(StoreFile.BloomType.ROW)
-            .withMaxKeyCount(27244696)
-            .build();
-    assertTrue(writer.hasGeneralBloom());
-    bloomWriteRead(writer, fs);
-
-    // this, however, is too large and should not create a bloom
-    // because Java can't create a contiguous array > MAX_INT
-    writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL)
-            .withFilePath(f)
-            .withBloomType(StoreFile.BloomType.ROW)
-            .withMaxKeyCount(Integer.MAX_VALUE)
-            .withChecksumType(CKTYPE)
-            .withBytesPerChecksum(CKBYTES)
-            .build();
-    assertFalse(writer.hasGeneralBloom());
-    writer.close();
-    fs.delete(f, true);
-  }
-
   public void testSeqIdComparator() {
     assertOrdering(StoreFile.Comparators.SEQ_ID,
         mockStoreFile(true, 1000, -1, "/foo/123"),
@@ -849,7 +790,7 @@ public class TestStoreFile extends HBase
     writer.close();
 
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     StoreFile.Reader reader = hsf.createReader();
     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
     TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
@@ -892,7 +833,7 @@ public class TestStoreFile extends HBase
     Path pathCowOff = new Path(baseDir, "123456789");
     StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     LOG.debug(hsf.getPath().toString());
 
     // Read this file, we should see 3 misses
@@ -914,7 +855,7 @@ public class TestStoreFile extends HBase
     Path pathCowOn = new Path(baseDir, "123456788");
     writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
     hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
 
     // Read this file, we should see 3 hits
     reader = hsf.createReader();
@@ -930,13 +871,13 @@ public class TestStoreFile extends HBase
 
     // Let's read back the two files to ensure the blocks exactly match
     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     StoreFile.Reader readerOne = hsf.createReader();
     readerOne.loadFileInfo();
     StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
     scannerOne.seek(KeyValue.LOWESTKEY);
     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     StoreFile.Reader readerTwo = hsf.createReader();
     readerTwo.loadFileInfo();
     StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
@@ -967,7 +908,7 @@ public class TestStoreFile extends HBase
     conf.setBoolean("hbase.rs.evictblocksonclose", true);
     cacheConf = new CacheConfig(conf);
     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     reader = hsf.createReader();
     reader.close(cacheConf.shouldEvictOnClose());
 
@@ -981,7 +922,7 @@ public class TestStoreFile extends HBase
     conf.setBoolean("hbase.rs.evictblocksonclose", false);
     cacheConf = new CacheConfig(conf);
     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
-        StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
     reader = hsf.createReader();
     reader.close(cacheConf.shouldEvictOnClose());
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java Thu Feb 14 12:58:12 2013
@@ -88,7 +88,7 @@ public class TestOpenRegionHandler {
   @Test public void testYankingRegionFromUnderIt()
   throws IOException, NodeExistsException, KeeperException {
     final Server server = new MockServer(HTU);
-    final RegionServerServices rss = new MockRegionServerServices();
+    final RegionServerServices rss = new MockRegionServerServices(HTU.getZooKeeperWatcher());
 
     HTableDescriptor htd = TEST_HTD;
     final HRegionInfo hri = TEST_HRI;

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java Thu Feb 14 12:58:12 2013
@@ -49,6 +49,9 @@ public class FaultySequenceFileLogReader
         HLogKey key = HLogUtil.newKey(conf);
         WALEdit val = new WALEdit();
         HLog.Entry e = new HLog.Entry(key, val);
+        if (compressionContext != null) {
+          e.setCompressionContext(compressionContext);
+        }
         b = this.reader.next(e.getKey(), e.getEdit());
         nextQueue.offer(e);
         numberOfFileEntries++;

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java Thu Feb 14 12:58:12 2013
@@ -323,11 +323,11 @@ public class TestHLog  {
       regionsToSeqids.put(l.toString().getBytes(), l);
     }
     byte [][] regions =
-      HLogUtil.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
+      FSHLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
     assertEquals(2, regions.length);
     assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
         Bytes.equals(regions[0], "1".getBytes()));
-    regions = HLogUtil.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
+    regions = FSHLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
     int count = 4;
     assertEquals(count, regions.length);
     // Regions returned are not ordered.
@@ -477,15 +477,13 @@ public class TestHLog  {
       throw t.exception;
 
     // Make sure you can read all the content
-    SequenceFile.Reader reader
-      = new SequenceFile.Reader(this.fs, walPath, this.conf);
+    HLog.Reader reader = HLogFactory.createReader(this.fs, walPath, this.conf);
     int count = 0;
-    HLogKey key = HLogUtil.newKey(conf);
-    WALEdit val = new WALEdit();
-    while (reader.next(key, val)) {
+    HLog.Entry entry = new HLog.Entry();
+    while (reader.next(entry) != null) {
       count++;
       assertTrue("Should be one KeyValue per WALEdit",
-                 val.getKeyValues().size() == 1);
+                  entry.getEdit().getKeyValues().size() == 1);
     }
     assertEquals(total, count);
     reader.close();
@@ -520,9 +518,8 @@ public class TestHLog  {
       htd.addFamily(new HColumnDescriptor("column"));
 
       log.append(info, tableName, cols, System.currentTimeMillis(), htd);
-      long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes());
-      log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
-          info.isMetaRegion());
+      log.startCacheFlush(info.getEncodedNameAsBytes());
+      log.completeCacheFlush(info.getEncodedNameAsBytes());
       log.close();
       Path filename = ((FSHLog) log).computeFilename();
       log = null;
@@ -542,20 +539,6 @@ public class TestHLog  {
         assertEquals((byte)(i + '0'), kv.getValue()[0]);
         System.out.println(key + " " + val);
       }
-      HLog.Entry entry = null;
-      while ((entry = reader.next(null)) != null) {
-        HLogKey key = entry.getKey();
-        WALEdit val = entry.getEdit();
-        // Assert only one more row... the meta flushed row.
-        assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
-        assertTrue(Bytes.equals(tableName, key.getTablename()));
-        KeyValue kv = val.getKeyValues().get(0);
-        assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
-        assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
-        assertEquals(0, Bytes.compareTo(HLogUtil.COMPLETE_CACHE_FLUSH,
-          val.getKeyValues().get(0).getValue()));
-        System.out.println(key + " " + val);
-      }
     } finally {
       if (log != null) {
         log.closeAndDelete();
@@ -591,8 +574,8 @@ public class TestHLog  {
       HTableDescriptor htd = new HTableDescriptor();
       htd.addFamily(new HColumnDescriptor("column"));
       log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
-      long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
-      log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
+      log.startCacheFlush(hri.getEncodedNameAsBytes());
+      log.completeCacheFlush(hri.getEncodedNameAsBytes());
       log.close();
       Path filename = ((FSHLog) log).computeFilename();
       log = null;
@@ -610,20 +593,6 @@ public class TestHLog  {
         System.out.println(entry.getKey() + " " + val);
         idx++;
       }
-
-      // Get next row... the meta flushed row.
-      entry = reader.next();
-      assertEquals(1, entry.getEdit().size());
-      for (KeyValue val : entry.getEdit().getKeyValues()) {
-        assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
-          entry.getKey().getEncodedRegionName()));
-        assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
-        assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
-        assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
-        assertEquals(0, Bytes.compareTo(HLogUtil.COMPLETE_CACHE_FLUSH,
-          val.getValue()));
-        System.out.println(entry.getKey() + " " + val);
-      }
     } finally {
       if (log != null) {
         log.closeAndDelete();
@@ -707,17 +676,19 @@ public class TestHLog  {
       assertEquals(3, ((FSHLog) log).getNumLogFiles());
 
       // Flush the first region, we expect to see the first two files getting
-      // archived
-      long seqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
-      log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
+      // archived. We need to append something or writer won't be rolled.
+      addEdits(log, hri2, tableName2, 1);
+      log.startCacheFlush(hri.getEncodedNameAsBytes());
+      log.completeCacheFlush(hri.getEncodedNameAsBytes());
       log.rollWriter();
       assertEquals(2, ((FSHLog) log).getNumLogFiles());
 
       // Flush the second region, which removes all the remaining output files
       // since the oldest was completely flushed and the two others only contain
       // flush information
-      seqId = log.startCacheFlush(hri2.getEncodedNameAsBytes());
-      log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
+      addEdits(log, hri2, tableName2, 1);
+      log.startCacheFlush(hri2.getEncodedNameAsBytes());
+      log.completeCacheFlush(hri2.getEncodedNameAsBytes());
       log.rollWriter();
       assertEquals(0, ((FSHLog) log).getNumLogFiles());
     } finally {

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java Thu Feb 14 12:58:12 2013
@@ -100,7 +100,7 @@ public class TestHLogSplit {
   private Configuration conf;
   private FileSystem fs;
 
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
 
   private static final Path HBASEDIR = new Path("/hbase");

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java Thu Feb 14 12:58:12 2013
@@ -247,12 +247,16 @@ public class TestLogRolling  {
       put.add(HConstants.CATALOG_FAMILY, null, value);
       table.put(put);
     }
+    Put tmpPut = new Put(Bytes.toBytes("tmprow"));
+    tmpPut.add(HConstants.CATALOG_FAMILY, null, value);
     long startTime = System.currentTimeMillis();
     long remaining = timeout;
     while (remaining > 0) {
       if (log.isLowReplicationRollEnabled() == expect) {
         break;
       } else {
+        // Trigger calling FSHlog#checkLowReplication()
+        table.put(tmpPut);
         try {
           Thread.sleep(200);
         } catch (InterruptedException e) {
@@ -371,7 +375,8 @@ public class TestLogRolling  {
     assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null);
 
     batchWriteAndWait(table, 3, false, 10000);
-    assertTrue("LowReplication Roller should've been disabled",
+    assertTrue("LowReplication Roller should've been disabled, current replication="
+            + ((FSHLog) log).getLogReplication(),
         !log.isLowReplicationRollEnabled());
 
     dfsCluster

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java Thu Feb 14 12:58:12 2013
@@ -117,7 +117,7 @@ public class TestWALActionsListener {
     assertEquals(11, observer.postLogRollCounter);
     assertEquals(5, laterobserver.preLogRollCounter);
     assertEquals(5, laterobserver.postLogRollCounter);
-    assertEquals(2, observer.closedCount);
+    assertEquals(1, observer.closedCount);
   }
 
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java Thu Feb 14 12:58:12 2013
@@ -380,7 +380,7 @@ public class TestWALReplay {
     }
     // Now assert edits made it in.
     final Get g = new Get(rowName);
-    Result result = region.get(g, null);
+    Result result = region.get(g);
     assertEquals(countPerFamily * htd.getFamilies().size(),
       result.size());
     // Now close the region (without flush), split the log, reopen the region and assert that
@@ -395,7 +395,7 @@ public class TestWALReplay {
     // HRegionServer usually does this. It knows the largest seqid across all regions.
     wal2.setSequenceNumber(seqid2);
     assertTrue(seqid + result.size() < seqid2);
-    final Result result1b = region2.get(g, null);
+    final Result result1b = region2.get(g);
     assertEquals(result.size(), result1b.size());
 
     // Next test.  Add more edits, then 'crash' this region by stealing its wal
@@ -405,7 +405,7 @@ public class TestWALReplay {
       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
     }
     // Get count of edits.
-    final Result result2 = region2.get(g, null);
+    final Result result2 = region2.get(g);
     assertEquals(2 * result.size(), result2.size());
     wal2.sync();
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
@@ -432,7 +432,7 @@ public class TestWALReplay {
         long seqid3 = region3.initialize();
         // HRegionServer usually does this. It knows the largest seqid across all regions.
         wal3.setSequenceNumber(seqid3);
-        Result result3 = region3.get(g, null);
+        Result result3 = region3.get(g);
         // Assert that count of cells is same as before crash.
         assertEquals(result2.size(), result3.size());
         assertEquals(htd.getFamilies().size() * countPerFamily,
@@ -492,7 +492,7 @@ public class TestWALReplay {
 
     // Now assert edits made it in.
     final Get g = new Get(rowName);
-    Result result = region.get(g, null);
+    Result result = region.get(g);
     assertEquals(countPerFamily * htd.getFamilies().size(),
       result.size());
 
@@ -524,7 +524,7 @@ public class TestWALReplay {
     wal2.setSequenceNumber(seqid2);
     assertTrue(seqid + result.size() < seqid2);
 
-    final Result result1b = region2.get(g, null);
+    final Result result1b = region2.get(g);
     assertEquals(result.size(), result1b.size());
   }
 
@@ -557,8 +557,8 @@ public class TestWALReplay {
     }
 
     // Add a cache flush, shouldn't have any effect
-    long logSeqId = wal.startCacheFlush(regionName);
-    wal.completeCacheFlush(regionName, tableName, logSeqId, hri.isMetaRegion());
+    wal.startCacheFlush(regionName);
+    wal.completeCacheFlush(regionName);
 
     // Add an edit to another family, should be skipped.
     WALEdit edit = new WALEdit();
@@ -612,7 +612,7 @@ public class TestWALReplay {
           assertTrue(seqid > wal.getSequenceNumber());
 
           Get get = new Get(rowName);
-          Result result = region.get(get, -1);
+          Result result = region.get(get);
           // Make sure we only see the good edits
           assertEquals(countPerFamily * (htd.getFamilies().size() - 1),
             result.size());
@@ -661,7 +661,7 @@ public class TestWALReplay {
     wal.doCompleteCacheFlush = true;
     // allow complete cache flush with the previous seq number got after first
     // set of edits.
-    wal.completeCacheFlush(hri.getEncodedNameAsBytes(), hri.getTableName(), sequenceNumber, false);
+    wal.completeCacheFlush(hri.getEncodedNameAsBytes());
     wal.close();
     FileStatus[] listStatus = this.fs.listStatus(wal.getDir());
     HLogSplitter.splitLogFile(hbaseRootDir, listStatus[0], this.fs, this.conf,
@@ -686,12 +686,11 @@ public class TestWALReplay {
     }
 
     @Override
-    public void completeCacheFlush(byte[] encodedRegionName, byte[] tableName, long logSeqId,
-        boolean isMetaRegion) throws IOException {
+    public void completeCacheFlush(byte[] encodedRegionName) {
       if (!doCompleteCacheFlush) {
         return;
       }
-      super.completeCacheFlush(encodedRegionName, tableName, logSeqId, isMetaRegion);
+      super.completeCacheFlush(encodedRegionName);
     }
   }
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java Thu Feb 14 12:58:12 2013
@@ -52,7 +52,7 @@ import org.junit.experimental.categories
 @Category(LargeTests.class)
 public class TestMasterReplication {
 
-  private static final Log LOG = LogFactory.getLog(TestReplication.class);
+  private static final Log LOG = LogFactory.getLog(TestReplicationBase.class);
 
   private Configuration conf1;
   private Configuration conf2;

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java Thu Feb 14 12:58:12 2013
@@ -47,7 +47,7 @@ import org.junit.experimental.categories
 @Category(LargeTests.class)
 public class TestMultiSlaveReplication {
 
-  private static final Log LOG = LogFactory.getLog(TestReplication.class);
+  private static final Log LOG = LogFactory.getLog(TestReplicationBase.class);
 
   private static Configuration conf1;
   private static Configuration conf2;

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationZookeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationZookeeper.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationZookeeper.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationZookeeper.java Thu Feb 14 12:58:12 2013
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.hbase.replication;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -28,6 +32,7 @@ import org.apache.hadoop.hbase.MediumTes
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
@@ -35,8 +40,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-
 @Category(MediumTests.class)
 public class TestReplicationZookeeper {
 
@@ -74,6 +77,24 @@ public class TestReplicationZookeeper {
     // HBASE-5586 used to get an NPE
     assertEquals(0, repZk.getSlavesAddresses("1").size());
   }
+  
+  @Test
+  public void testIsPeerPath_PathToParentOfPeerNode() {
+    String peerParentNode = repZk.getPeersZNode();
+    assertFalse(repZk.isPeerPath(peerParentNode));
+  }
+  
+  @Test
+  public void testIsPeerPath_PathToChildOfPeerNode() {
+    String peerChild = ZKUtil.joinZNode(ZKUtil.joinZNode(repZk.getPeersZNode(), "1"), "child");
+    assertFalse(repZk.isPeerPath(peerChild));
+  }
+  
+  @Test
+  public void testIsPeerPath_ActualPeerPath() {
+    String peerPath = ZKUtil.joinZNode(repZk.getPeersZNode(), "1");
+    assertTrue(repZk.isPeerPath(peerPath));
+  }
 
   static class DummyServer implements Server {
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java Thu Feb 14 12:58:12 2013
@@ -116,6 +116,7 @@ public class TestMultiRowResource {
 
     Response response = client.get(path.toString(), Constants.MIMETYPE_JSON);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
 
     client.delete(row_5_url);
     client.delete(row_6_url);
@@ -142,6 +143,7 @@ public class TestMultiRowResource {
 
     Response response = client.get(path.toString(), Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
 
     client.delete(row_5_url);
     client.delete(row_6_url);

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java Thu Feb 14 12:58:12 2013
@@ -210,6 +210,7 @@ public class TestRowResource {
       String value) throws IOException, JAXBException {
     Response response = getValueXML(table, row, column);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     CellSetModel cellSet = (CellSetModel)
       unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
     RowModel rowModel = cellSet.getRows().get(0);
@@ -222,6 +223,7 @@ public class TestRowResource {
       String column, String value) throws IOException, JAXBException {
     Response response = getValueXML(url);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     CellSetModel cellSet = (CellSetModel)
       unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
     RowModel rowModel = cellSet.getRows().get(0);
@@ -259,6 +261,7 @@ public class TestRowResource {
       String value) throws IOException {
     Response response = getValuePB(table, row, column);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
     CellSetModel cellSet = new CellSetModel();
     cellSet.getObjectFromMessage(response.getBody());
     RowModel rowModel = cellSet.getRows().get(0);
@@ -501,6 +504,7 @@ public class TestRowResource {
 
     response = client.get(path, Constants.MIMETYPE_BINARY);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type"));
     assertTrue(Bytes.equals(response.getBody(), body));
     boolean foundTimestampHeader = false;
     for (Header header: response.getHeaders()) {
@@ -524,6 +528,7 @@ public class TestRowResource {
     Thread.yield();
     response = client.get(path, Constants.MIMETYPE_JSON);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
     response = deleteRow(TABLE, ROW_4);
     assertEquals(response.getCode(), 200);
   }
@@ -537,6 +542,7 @@ public class TestRowResource {
     Thread.yield();
     response = client.get(path, Constants.MIMETYPE_JSON);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
     response = deleteRow(TABLE, ROW_4);
     assertEquals(response.getCode(), 200);
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java Thu Feb 14 12:58:12 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 import static org.junit.Assert.*;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -124,6 +125,7 @@ public class TestScannerResource {
       response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
       assertTrue(response.getCode() == 200 || response.getCode() == 204);
       if (response.getCode() == 200) {
+        assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
         CellSetModel cellSet = new CellSetModel();
         cellSet.getObjectFromMessage(response.getBody());
         Iterator<RowModel> rows = cellSet.getRows().iterator();
@@ -207,6 +209,7 @@ public class TestScannerResource {
     // get a cell set
     response = client.get(scannerURI, Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     CellSetModel cellSet = (CellSetModel)
       unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
     // confirm batch size conformance
@@ -250,6 +253,7 @@ public class TestScannerResource {
     // get a cell set
     response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
     CellSetModel cellSet = new CellSetModel();
     cellSet.getObjectFromMessage(response.getBody());
     // confirm batch size conformance
@@ -292,6 +296,7 @@ public class TestScannerResource {
     // get a cell
     response = client.get(scannerURI, Constants.MIMETYPE_BINARY);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type"));
     // verify that data was returned
     assertTrue(response.getBody().length > 0);
     // verify that the expected X-headers are present

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java Thu Feb 14 12:58:12 2013
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 import static org.junit.Assert.*;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -216,7 +217,7 @@ public class TestScannersWithFilters {
     marshaller.marshal(model, writer);
     LOG.debug(writer.toString());
     byte[] body = Bytes.toBytes(writer.toString());
-    Response response = client.put("/" + TABLE + "/scanner", 
+    Response response = client.put("/" + TABLE + "/scanner",
       Constants.MIMETYPE_XML, body);
     assertEquals(response.getCode(), 201);
     String scannerURI = response.getLocation();
@@ -225,6 +226,7 @@ public class TestScannersWithFilters {
     // get a cell set
     response = client.get(scannerURI, Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     CellSetModel cells = (CellSetModel)
       unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
 
@@ -250,7 +252,7 @@ public class TestScannersWithFilters {
     marshaller.marshal(model, writer);
     LOG.debug(writer.toString());
     byte[] body = Bytes.toBytes(writer.toString());
-    Response response = client.put("/" + TABLE + "/scanner", 
+    Response response = client.put("/" + TABLE + "/scanner",
       Constants.MIMETYPE_XML, body);
     assertEquals(response.getCode(), 201);
     String scannerURI = response.getLocation();
@@ -259,6 +261,7 @@ public class TestScannersWithFilters {
     // get a cell set
     response = client.get(scannerURI, Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     CellSetModel cellSet = (CellSetModel)
       unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
 
@@ -312,6 +315,7 @@ public class TestScannersWithFilters {
     // get a cell set
     response = client.get(scannerURI, Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     CellSetModel cellSet = (CellSetModel)
       unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
 

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java Thu Feb 14 12:58:12 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 import static org.junit.Assert.*;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -109,6 +110,7 @@ public class TestSchemaResource {
     // retrieve the schema and validate it
     response = client.get(schemaPath, Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     model = fromXML(response.getBody());
     TestTableSchemaModel.checkModel(model, TABLE1);
 
@@ -147,6 +149,15 @@ public class TestSchemaResource {
     // retrieve the schema and validate it
     response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
+    model = new TableSchemaModel();
+    model.getObjectFromMessage(response.getBody());
+    TestTableSchemaModel.checkModel(model, TABLE2);
+
+    // retrieve the schema and validate it with alternate pbuf type
+    response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF_IETF);
+    assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
     model = new TableSchemaModel();
     model.getObjectFromMessage(response.getBody());
     TestTableSchemaModel.checkModel(model, TABLE2);

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java Thu Feb 14 12:58:12 2013
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 import static org.junit.Assert.*;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -94,6 +95,7 @@ public class TestStatusResource {
   public void testGetClusterStatusXML() throws IOException, JAXBException {
     Response response = client.get("/status/cluster", Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     StorageClusterStatusModel model = (StorageClusterStatusModel)
       context.createUnmarshaller().unmarshal(
         new ByteArrayInputStream(response.getBody()));
@@ -102,13 +104,17 @@ public class TestStatusResource {
 
   @Test
   public void testGetClusterStatusPB() throws IOException {
-    Response response = client.get("/status/cluster", 
-      Constants.MIMETYPE_PROTOBUF);
+    Response response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
     StorageClusterStatusModel model = new StorageClusterStatusModel();
     model.getObjectFromMessage(response.getBody());
     validate(model);
+    response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF_IETF);
+    assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+    model = new StorageClusterStatusModel();
+    model.getObjectFromMessage(response.getBody());
+    validate(model);
   }
-
 }
-

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java Thu Feb 14 12:58:12 2013
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.util.StringUtils;
 
 import static org.junit.Assert.*;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -178,12 +179,14 @@ public class TestTableResource {
   public void testTableListText() throws IOException {
     Response response = client.get("/", Constants.MIMETYPE_TEXT);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
   }
 
   @Test
   public void testTableListXML() throws IOException, JAXBException {
     Response response = client.get("/", Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     TableListModel model = (TableListModel)
       context.createUnmarshaller()
         .unmarshal(new ByteArrayInputStream(response.getBody()));
@@ -194,29 +197,37 @@ public class TestTableResource {
   public void testTableListJSON() throws IOException {
     Response response = client.get("/", Constants.MIMETYPE_JSON);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
   }
 
   @Test
   public void testTableListPB() throws IOException, JAXBException {
     Response response = client.get("/", Constants.MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
     TableListModel model = new TableListModel();
     model.getObjectFromMessage(response.getBody());
     checkTableList(model);
+    response = client.get("/", Constants.MIMETYPE_PROTOBUF_IETF);
+    assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+    model = new TableListModel();
+    model.getObjectFromMessage(response.getBody());
+    checkTableList(model);
   }
 
   @Test
   public void testTableInfoText() throws IOException {
-    Response response = client.get("/" + TABLE + "/regions",
-      Constants.MIMETYPE_TEXT);
+    Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_TEXT);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
   }
 
   @Test
   public void testTableInfoXML() throws IOException, JAXBException {
-    Response response = client.get("/" + TABLE + "/regions", 
-      Constants.MIMETYPE_XML);
+    Response response = client.get("/" + TABLE + "/regions",  Constants.MIMETYPE_XML);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     TableInfoModel model = (TableInfoModel)
       context.createUnmarshaller()
         .unmarshal(new ByteArrayInputStream(response.getBody()));
@@ -225,19 +236,25 @@ public class TestTableResource {
 
   @Test
   public void testTableInfoJSON() throws IOException {
-    Response response = client.get("/" + TABLE + "/regions", 
-      Constants.MIMETYPE_JSON);
+    Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_JSON);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
   }
 
   @Test
   public void testTableInfoPB() throws IOException, JAXBException {
-    Response response = client.get("/" + TABLE + "/regions",
-      Constants.MIMETYPE_PROTOBUF);
+    Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
     TableInfoModel model = new TableInfoModel();
     model.getObjectFromMessage(response.getBody());
     checkTableInfo(model);
+    response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF_IETF);
+    assertEquals(response.getCode(), 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+    model = new TableInfoModel();
+    model.getObjectFromMessage(response.getBody());
+    checkTableInfo(model);
   }
 
 }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java Thu Feb 14 12:58:12 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.rest.mode
 import org.apache.hadoop.hbase.util.Bytes;
 
 import static org.junit.Assert.*;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -96,6 +97,7 @@ public class TestVersionResource {
   public void testGetStargateVersionText() throws IOException {
     Response response = client.get("/version", Constants.MIMETYPE_TEXT);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
     String body = Bytes.toString(response.getBody());
     assertTrue(body.length() > 0);
     assertTrue(body.contains(RESTServlet.VERSION_STRING));
@@ -113,6 +115,7 @@ public class TestVersionResource {
   public void testGetStargateVersionXML() throws IOException, JAXBException {
     Response response = client.get("/version", Constants.MIMETYPE_XML);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     VersionModel model = (VersionModel)
       context.createUnmarshaller().unmarshal(
         new ByteArrayInputStream(response.getBody()));
@@ -124,23 +127,30 @@ public class TestVersionResource {
   public void testGetStargateVersionJSON() throws IOException {
     Response response = client.get("/version", Constants.MIMETYPE_JSON);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
   }
 
   @Test
   public void testGetStargateVersionPB() throws IOException {
     Response response = client.get("/version", Constants.MIMETYPE_PROTOBUF);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type"));
     VersionModel model = new VersionModel();
     model.getObjectFromMessage(response.getBody());
     validate(model);
-    LOG.info("success retrieving Stargate version as protobuf");
+    response = client.get("/version", Constants.MIMETYPE_PROTOBUF_IETF);
+    assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type"));
+    model = new VersionModel();
+    model.getObjectFromMessage(response.getBody());
+    validate(model);
   }
 
   @Test
   public void testGetStorageClusterVersionText() throws IOException {
-    Response response = client.get("/version/cluster", 
-      Constants.MIMETYPE_TEXT);
+    Response response = client.get("/version/cluster", Constants.MIMETYPE_TEXT);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type"));
   }
 
   @Test
@@ -148,6 +158,7 @@ public class TestVersionResource {
       JAXBException {
     Response response = client.get("/version/cluster",Constants.MIMETYPE_XML);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type"));
     StorageClusterVersionModel clusterVersionModel = 
       (StorageClusterVersionModel)
         context.createUnmarshaller().unmarshal(
@@ -161,6 +172,7 @@ public class TestVersionResource {
   public void doTestGetStorageClusterVersionJSON() throws IOException {
     Response response = client.get("/version/cluster", Constants.MIMETYPE_JSON);
     assertTrue(response.getCode() == 200);
+    assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
   }
 
 }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java Thu Feb 14 12:58:12 2013
@@ -31,7 +31,8 @@ public class SecureTestUtil {
     conf.set("hadoop.security.authorization", "false");
     conf.set("hadoop.security.authentication", "simple");
     conf.set("hbase.coprocessor.master.classes", AccessController.class.getName());
-    conf.set("hbase.coprocessor.region.classes", AccessController.class.getName());
+    conf.set("hbase.coprocessor.region.classes", AccessController.class.getName()+
+        ","+SecureBulkLoadEndpoint.class.getName());
     // add the process running user to superusers
     String currentUser = User.getCurrent().getName();
     conf.set("hbase.superuser", "admin,"+currentUser);