You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC

svn commit: r1576909 [17/18] - in /hbase/branches/0.89-fb/src: ./ examples/thrift/ main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/avro/ main/java/org/apache/hadoop/hbase/avro/generated/ main/java/org/apache/hadoop/hbase/client/ ma...

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQOS.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQOS.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQOS.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQOS.java Wed Mar 12 21:17:13 2014
@@ -124,255 +124,3 @@ public class TestQOS {
     }
   }
 }
-package org.apache.hadoop.hbase.regionserver;
-
-import java.util.Arrays;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.util.InjectionEvent;
-import org.apache.hadoop.util.InjectionEventI;
-import org.apache.hadoop.util.InjectionHandler;
-import org.apache.hadoop.util.NativeCodeLoader;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.io.nativeio.NativeIO;
-
-import org.junit.AfterClass;
-import static org.junit.Assert.*;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestQOS {
-  private static final Log LOG = LogFactory.getLog(TestQOS.class);
-  protected static HBaseTestingUtility TEST_UTIL;
-  private static volatile int classOfServiceR;
-  private static volatile int classOfServiceW;
-  private static volatile int priorityW;
-  private static volatile int priorityR;
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    conf.setBoolean(HConstants.HBASE_ENABLE_QOS_KEY, true);
-    TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniCluster(1);
-    InjectionHandler.set(new TestQOSHandler());
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  public static class TestQOSHandler extends
-      InjectionHandler {
-    protected void _processEvent(InjectionEventI event, Object... args) {
-      if (event == InjectionEvent.DATANODE_WRITE_BLOCK
-          || event == InjectionEvent.DATANODE_READ_BLOCK) {
-        if (NativeCodeLoader.isNativeCodeLoaded()) {
-          try {
-            int value = NativeIO.ioprioGetIfPossible();
-            // See ioprio.h for explanation on the following lines
-            if (event ==  InjectionEvent.DATANODE_READ_BLOCK) {
-              priorityR = value & ((1 << 13) - 1);
-              classOfServiceR = value >> 13;
-            } else {
-              priorityW = value & ((1 << 13) - 1);
-              classOfServiceW = value >> 13;
-            }
-          } catch (Exception e) {
-
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testBasic() throws Exception {
-    byte[] family = "family".getBytes();
-    HTable table = TEST_UTIL.createTable("test".getBytes(), family);
-    TEST_UTIL.loadTable2(table, family);
-    HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
-    byte[] regionName = null;
-    for (HRegion region : server.getOnlineRegions()) {
-      if (!new String(region.getTableDesc().getName()).equals(new String(table
-          .getTableName()))) {
-        LOG.info("Skipping since name is : "
-            + new String(region.getTableDesc().getName()));
-        continue;
-      }
-      Store store = region.getStore(family);
-      region.flushcache();
-      regionName = region.getRegionName();
-      if (NativeCodeLoader.isNativeCodeLoaded()) {
-        LOG.info("Verifying priorities");
-        assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceW);
-        assertEquals(HConstants.FLUSH_PRIORITY, priorityW);
-      }
-      store.compactRecentForTesting(-1);
-      if (NativeCodeLoader.isNativeCodeLoaded()) {
-        assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceW);
-        assertEquals(HConstants.COMPACT_PRIORITY, priorityW);
-      }
-    }
-
-    // Roll the log so that we start a new block for the HLog and
-    // capture the IOPRIO injection.
-    server.getLog(0).rollWriter();
-    Put p = new Put("row".getBytes());
-    p.add("family".getBytes(), "qualifier".getBytes(), "value".getBytes());
-    table.put(p);
-    table.flushCommits();
-    LOG.info("DONE WAL WRITE");
-    if (NativeCodeLoader.isNativeCodeLoaded()) {
-      assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceW);
-      assertEquals(HConstants.HLOG_PRIORITY, priorityW);
-    }
-    // Flush the region so that the read hits the datanode.
-    server.flushRegion(regionName);
-    LOG.info("FINISHED FLUSH");
-    Get g = new Get("row".getBytes());
-    g.addColumn("family".getBytes(), "qualifier".getBytes());
-    Result r = table.get(g);
-    LOG.info("FINISHED GET");
-    assertTrue(Arrays.equals("value".getBytes(),
-        r.getFamilyMap("family".getBytes()).get("qualifier".getBytes())));
-    if (NativeCodeLoader.isNativeCodeLoaded()) {
-      assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceR);
-      assertEquals(HConstants.PREAD_PRIORITY, priorityR);
-    }
-  }
-}
-package org.apache.hadoop.hbase.regionserver;
-
-import java.util.Arrays;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.util.InjectionEvent;
-import org.apache.hadoop.util.InjectionEventI;
-import org.apache.hadoop.util.InjectionHandler;
-import org.apache.hadoop.util.NativeCodeLoader;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.io.nativeio.NativeIO;
-
-import org.junit.AfterClass;
-import static org.junit.Assert.*;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestQOS {
-  private static final Log LOG = LogFactory.getLog(TestQOS.class);
-  protected static HBaseTestingUtility TEST_UTIL;
-  private static volatile int classOfServiceR;
-  private static volatile int classOfServiceW;
-  private static volatile int priorityW;
-  private static volatile int priorityR;
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    conf.setBoolean(HConstants.HBASE_ENABLE_QOS_KEY, true);
-    TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniCluster(1);
-    InjectionHandler.set(new TestQOSHandler());
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  public static class TestQOSHandler extends
-      InjectionHandler {
-    protected void _processEvent(InjectionEventI event, Object... args) {
-      if (event == InjectionEvent.DATANODE_WRITE_BLOCK
-          || event == InjectionEvent.DATANODE_READ_BLOCK) {
-        if (NativeCodeLoader.isNativeCodeLoaded()) {
-          try {
-            int value = NativeIO.ioprioGetIfPossible();
-            // See ioprio.h for explanation on the following lines
-            if (event ==  InjectionEvent.DATANODE_READ_BLOCK) {
-              priorityR = value & ((1 << 13) - 1);
-              classOfServiceR = value >> 13;
-            } else {
-              priorityW = value & ((1 << 13) - 1);
-              classOfServiceW = value >> 13;
-            }
-          } catch (Exception e) {
-
-          }
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testBasic() throws Exception {
-    byte[] family = "family".getBytes();
-    HTable table = TEST_UTIL.createTable("test".getBytes(), family);
-    TEST_UTIL.loadTable2(table, family);
-    HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
-    byte[] regionName = null;
-    for (HRegion region : server.getOnlineRegions()) {
-      if (!new String(region.getTableDesc().getName()).equals(new String(table
-          .getTableName()))) {
-        LOG.info("Skipping since name is : "
-            + new String(region.getTableDesc().getName()));
-        continue;
-      }
-      Store store = region.getStore(family);
-      region.flushcache();
-      regionName = region.getRegionName();
-      if (NativeCodeLoader.isNativeCodeLoaded()) {
-        LOG.info("Verifying priorities");
-        assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceW);
-        assertEquals(HConstants.FLUSH_PRIORITY, priorityW);
-      }
-      store.compactRecentForTesting(-1);
-      if (NativeCodeLoader.isNativeCodeLoaded()) {
-        assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceW);
-        assertEquals(HConstants.COMPACT_PRIORITY, priorityW);
-      }
-    }
-
-    // Roll the log so that we start a new block for the HLog and
-    // capture the IOPRIO injection.
-    server.getLog(0).rollWriter();
-    Put p = new Put("row".getBytes());
-    p.add("family".getBytes(), "qualifier".getBytes(), "value".getBytes());
-    table.put(p);
-    table.flushCommits();
-    LOG.info("DONE WAL WRITE");
-    if (NativeCodeLoader.isNativeCodeLoaded()) {
-      assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceW);
-      assertEquals(HConstants.HLOG_PRIORITY, priorityW);
-    }
-    // Flush the region so that the read hits the datanode.
-    server.flushRegion(regionName);
-    LOG.info("FINISHED FLUSH");
-    Get g = new Get("row".getBytes());
-    g.addColumn("family".getBytes(), "qualifier".getBytes());
-    Result r = table.get(g);
-    LOG.info("FINISHED GET");
-    assertTrue(Arrays.equals("value".getBytes(),
-        r.getFamilyMap("family".getBytes()).get("qualifier".getBytes())));
-    if (NativeCodeLoader.isNativeCodeLoaded()) {
-      assertEquals(HConstants.IOPRIO_CLASSOF_SERVICE, classOfServiceR);
-      assertEquals(HConstants.PREAD_PRIORITY, priorityR);
-    }
-  }
-}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java Wed Mar 12 21:17:13 2014
@@ -53,6 +53,7 @@ public class TestQueryMatcher extends HB
   long ttl = Long.MAX_VALUE;
   KeyComparator rowComparator;
   private Scan scan;
+  public static final boolean USE_BLOOM_DEL_COLFILTER = false;
 
   public void setUp() throws Exception {
     super.setUp();
@@ -97,7 +98,7 @@ public class TestQueryMatcher extends HB
     // 2,4,5
     ScanQueryMatcher qm =
         new ScanQueryMatcher(scan, fam2, get.getFamilyMap().get(fam2),
-            rowComparator, 1, EnvironmentEdgeManager.currentTimeMillis() - ttl);
+            rowComparator, 1, EnvironmentEdgeManager.currentTimeMillis() - ttl, USE_BLOOM_DEL_COLFILTER);
 
     List<KeyValue> memstore = new ArrayList<KeyValue>();
     memstore.add(new KeyValue(row1, fam2, col1, 1, data));
@@ -112,7 +113,7 @@ public class TestQueryMatcher extends HB
     qm.setRow(memstore.get(0).getRow());
 
     for (KeyValue kv : memstore){
-      actual.add(qm.match(kv));
+      actual.add(qm.match(kv, null));
     }
 
     assertEquals(expected.size(), actual.size());
@@ -141,7 +142,7 @@ public class TestQueryMatcher extends HB
     expected.add(ScanQueryMatcher.MatchCode.DONE);
 
     ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2, null, rowComparator,
-        1, EnvironmentEdgeManager.currentTimeMillis() - ttl);
+        1, EnvironmentEdgeManager.currentTimeMillis() - ttl, USE_BLOOM_DEL_COLFILTER);
 
     List<KeyValue> memstore = new ArrayList<KeyValue>();
     memstore.add(new KeyValue(row1, fam2, col1, 1, data));
@@ -156,7 +157,7 @@ public class TestQueryMatcher extends HB
     qm.setRow(memstore.get(0).getRow());
 
     for(KeyValue kv : memstore) {
-      actual.add(qm.match(kv));
+      actual.add(qm.match(kv, null));
     }
 
     assertEquals(expected.size(), actual.size());
@@ -194,7 +195,7 @@ public class TestQueryMatcher extends HB
     ScanQueryMatcher qm =
         new ScanQueryMatcher(scan, fam2, get.getFamilyMap().get(fam2),
             rowComparator, 1, EnvironmentEdgeManager.currentTimeMillis()
-                - testTTL);
+                - testTTL, USE_BLOOM_DEL_COLFILTER);
 
     long now = System.currentTimeMillis();
     KeyValue [] kvs = new KeyValue[] {
@@ -210,7 +211,7 @@ public class TestQueryMatcher extends HB
 
     List<MatchCode> actual = new ArrayList<MatchCode>(kvs.length);
     for (KeyValue kv : kvs) {
-      actual.add( qm.match(kv) );
+      actual.add( qm.match(kv, null) );
     }
 
     assertEquals(expected.length, actual.size());
@@ -247,7 +248,7 @@ public class TestQueryMatcher extends HB
 
     ScanQueryMatcher qm =
         new ScanQueryMatcher(scan, fam2, null, rowComparator, 1,
-            EnvironmentEdgeManager.currentTimeMillis() - testTTL);
+            EnvironmentEdgeManager.currentTimeMillis() - testTTL, USE_BLOOM_DEL_COLFILTER);
 
     long now = System.currentTimeMillis();
     KeyValue [] kvs = new KeyValue[] {
@@ -262,7 +263,7 @@ public class TestQueryMatcher extends HB
 
     List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>(kvs.length);
     for (KeyValue kv : kvs) {
-      actual.add( qm.match(kv) );
+      actual.add( qm.match(kv, null) );
     }
 
     assertEquals(expected.length, actual.size());

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerResets.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerResets.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerResets.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerResets.java Wed Mar 12 21:17:13 2014
@@ -135,7 +135,7 @@ public class TestScannerResets extends H
     assertEquals("ending index", idx, maxExclusiveTS);
 
     long blocksEnd = getBlkAccessCount(cf);
-
+    
     assertEquals("Blocks Read Check: ", expBlocks, blocksEnd - blocksStart);
     System.out.println("Blocks Read = "
         + (blocksEnd - blocksStart) + "Expected = " + expBlocks);
@@ -156,7 +156,7 @@ public class TestScannerResets extends H
 
     // File2: Timestamp range of keys [4..8]
     putData(FAMILY, "row4", "col4", 4);
-    putData(FAMILY, "row5", "col5", 5);
+    putData(FAMILY, "row5", "col5", 5);    
     putData(FAMILY, "row6", "col6", 6);
     putData(FAMILY, "row7", "col7", 7);
     putData(FAMILY, "row8", "col8", 8);

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java Wed Mar 12 21:17:13 2014
@@ -54,11 +54,11 @@ public class TestSplitLogWorker {
       new HBaseTestingUtility();
   private ZooKeeperWrapper zkw;
   private SplitLogWorker slw;
-
+  
   private interface Expr {
     public long eval();
   }
-
+  
   private void waitForCounter(final AtomicLong ctr, long oldval, long newval,
       long timems) {
     Expr e = new Expr() {
@@ -69,7 +69,7 @@ public class TestSplitLogWorker {
     waitForCounter(e, oldval, newval, timems);
     return;
   }
-
+ 
   private void waitForCounter(Expr e, long oldval, long newval,
       long timems) {
     long curt = System.currentTimeMillis();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java Wed Mar 12 21:17:13 2014
@@ -43,13 +43,13 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
- * Tests the block cache summary functionality in StoreFile,
+ * Tests the block cache summary functionality in StoreFile, 
  * which contains the BlockCache
  *
  */
 public class TestStoreFileBlockCacheSummary {
   final Log LOG = LogFactory.getLog(getClass());
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();  
   private static final String TEST_TABLE = "testTable";
   private static final String TEST_TABLE2 = "testTable2";
   private static final String TEST_CF = "testFamily";
@@ -58,7 +58,7 @@ public class TestStoreFileBlockCacheSumm
   private static byte [] VALUE = Bytes.toBytes("testValue");
 
   private final int TOTAL_ROWS = 4;
-
+  
   /**
    * @throws java.lang.Exception exception
    */
@@ -74,14 +74,14 @@ public class TestStoreFileBlockCacheSumm
   public static void tearDownAfterClass() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
   }
-
+  
 
   private Put createPut(byte[] family, String row) {
     Put put = new Put( Bytes.toBytes(row));
     put.add(family, QUALIFIER, VALUE);
     return put;
   }
-
+  
   /**
   * This test inserts data into multiple tables and then reads both tables to ensure
   * they are in the block cache.
@@ -97,18 +97,18 @@ public class TestStoreFileBlockCacheSumm
    addRows(ht2, FAMILY);
 
    TEST_UTIL.flush();
-
+   
    scan(ht, FAMILY);
    scan(ht2, FAMILY);
-
+      
    BlockCache bc =
      new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
-   List<BlockCacheColumnFamilySummary> bcs =
+   List<BlockCacheColumnFamilySummary> bcs = 
      bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
    LOG.info("blockCacheSummary: " + bcs);
 
    assertEquals("blockCache summary has entries", 3, bcs.size());
-
+   
    BlockCacheColumnFamilySummary e = bcs.get(0);
    assertEquals("table", "-ROOT-", e.getTable());
    assertEquals("cf", "info", e.getColumnFamily());
@@ -126,7 +126,7 @@ public class TestStoreFileBlockCacheSumm
  }
 
  private void addRows(HTable ht, byte[] family) throws IOException {
-
+ 
    for (int i = 0; i < TOTAL_ROWS;i++) {
      ht.put(createPut(family, "row" + i));
    }
@@ -135,7 +135,7 @@ public class TestStoreFileBlockCacheSumm
  private void scan(HTable ht, byte[] family) throws IOException {
    Scan scan = new Scan();
    scan.addColumn(family, QUALIFIER);
-
+   
    int count = 0;
    for(@SuppressWarnings("unused") Result result : ht.getScanner(scan)) {
      count++;

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java Wed Mar 12 21:17:13 2014
@@ -131,7 +131,7 @@ public class TestWideScanner extends HBa
         }
 
         results.clear();
-
+        
         // trigger ChangedReadersObservers
         Iterator<KeyValueScanner> scanners =
             ((RegionScanner)s).storeHeap.getHeap().iterator();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestRpcMetricWrapper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestRpcMetricWrapper.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestRpcMetricWrapper.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestRpcMetricWrapper.java Wed Mar 12 21:17:13 2014
@@ -106,6 +106,6 @@ public class TestRpcMetricWrapper extend
     assertEquals(1, dummyMetricsRecord.getMetric("foo_min").longValue());
     assertEquals(100, dummyMetricsRecord.getMetric("foo_max").longValue());
     assertEquals(100, dummyMetricsRecord.getMetric("foo_num_ops").longValue());
-    assertEquals(95, dummyMetricsRecord.getMetric("foo_p95").longValue());
+    assertEquals(96, dummyMetricsRecord.getMetric("foo_p95").longValue());
   }
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java Wed Mar 12 21:17:13 2014
@@ -78,7 +78,7 @@ public class HBaseRESTClusterTestBase ex
     server.start();
       // get the port
     testServletPort = server.getConnectors()[0].getLocalPort();
-
+    
     LOG.info("started " + server.getClass().getName() + " on port " + 
       testServletPort);
   }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java Wed Mar 12 21:17:13 2014
@@ -128,7 +128,7 @@ public class TestRowResource extends HBa
     return response;
   }
 
-  Response getValuePB(String table, String row, String column)
+  Response getValuePB(String table, String row, String column) 
       throws IOException {
     StringBuilder path = new StringBuilder();
     path.append('/');

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java Wed Mar 12 21:17:13 2014
@@ -193,7 +193,7 @@ public class TestScannersWithFilters ext
     super.tearDown();
   }
 
-  void verifyScan(Scan s, long expectedRows, long expectedKeys)
+  void verifyScan(Scan s, long expectedRows, long expectedKeys) 
       throws Exception {
     ScannerModel model = ScannerModel.fromScan(s);
     model.setBatch(Integer.MAX_VALUE); // fetch it all at once
@@ -279,7 +279,7 @@ public class TestScannersWithFilters ext
       kvs.length, idx);
   }
 
-  void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys)
+  void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) 
       throws Exception {
     ScannerModel model = ScannerModel.fromScan(s);
     model.setBatch(Integer.MAX_VALUE); // fetch it all at once
@@ -964,7 +964,7 @@ public class TestScannersWithFilters ext
     };
     verifyScanFull(s, kvs);
   }
-
+  
   public void testScannersWithFilters() throws Exception {
     doTestNoFilter();
     doTestPrefixFilter();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java Wed Mar 12 21:17:13 2014
@@ -208,7 +208,7 @@ public class TestTableResource extends H
   }
 
   void doTestTableInfoPB() throws IOException, JAXBException {
-    Response response =
+    Response response = 
       client.get("/" + TABLE + "/regions", MIMETYPE_PROTOBUF);
     assertEquals(response.getCode(), 200);
     TableInfoModel model = new TableInfoModel();

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java Wed Mar 12 21:17:13 2014
@@ -117,13 +117,13 @@ public class TestCallQueue {
   }
 
   private static void verifyMetrics(ThriftMetrics metrics, String name, int expectValue)
-      throws Exception {
-    MetricsContext context = MetricsUtil.getContext(
-        ThriftMetrics.CONTEXT_NAME);
-    metrics.doUpdates(context);
-    OutputRecord record = context.getAllRecords().get(
-        ThriftMetrics.CONTEXT_NAME).iterator().next();
-    assertEquals(expectValue, record.getMetric(name).intValue());
+      throws Exception { 
+    MetricsContext context = MetricsUtil.getContext( 
+        ThriftMetrics.CONTEXT_NAME); 
+    metrics.doUpdates(context); 
+    OutputRecord record = context.getAllRecords().get( 
+        ThriftMetrics.CONTEXT_NAME).iterator().next(); 
+    assertEquals(expectValue, record.getMetric(name).intValue()); 
   }
 
   private static Runnable createDummyRunnable() {
@@ -134,3 +134,4 @@ public class TestCallQueue {
     };
   }
 }
+

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHBCpp.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHBCpp.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHBCpp.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHBCpp.java Wed Mar 12 21:17:13 2014
@@ -76,10 +76,8 @@ public class TestHBCpp {
    */
   public void testSimpleClient() throws Exception {
     // Allow the developer to override the default fbcode build location.
-    String fbcodeDir = System.getenv("FBCODE_DIR");
-    if (fbcodeDir == null) {
-      fbcodeDir = "/home/engshare/contbuild/fbcode/hbase";
-    }
+    String fbcodeDir = System.getProperty("fbcode.root",
+        "/home/engshare/contbuild/fbcode/hbase");
 
     executeCommand(new String[] {
         fbcodeDir + "/_bin/hbase/hbcpp/SimpleClient",

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHeaderSendReceive.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHeaderSendReceive.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHeaderSendReceive.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHeaderSendReceive.java Wed Mar 12 21:17:13 2014
@@ -27,17 +27,15 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.ipc.ProfilingData;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.jruby.RubyProcess.Sys;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 /**
- * Chech whether we correctly receive profiling data with the header protocol enabled
+ * Check whether we correctly receive profiling data with the header protocol enabled
  *
  */
 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestNativeThriftClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestNativeThriftClient.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestNativeThriftClient.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestNativeThriftClient.java Wed Mar 12 21:17:13 2014
@@ -38,10 +38,9 @@ public class TestNativeThriftClient {
     // Spawn the current version of client unit tests from fbcode.
     // Allow the developer to override
     // the default fbcode build location.
-    String fbcodeDir = System.getenv("FBCODE_DIR");
-    if (fbcodeDir == null) {
-      fbcodeDir = "/home/engshare/contbuild/fbcode/hbase";
-    }
+    String fbcodeDir = System.getProperty("fbcode.root",
+        "/home/engshare/contbuild/fbcode/hbase");
+
     executeCommand(new String[] {
       fbcodeDir + "/_bin/hbase/src/testing/native_thrift",
       "--hbase",

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java Wed Mar 12 21:17:13 2014
@@ -355,7 +355,7 @@ public class TestThriftServerLegacy exte
 
   /**
    * Tests some of the getRows*() calls.
-   *
+   * 
    * @throws Exception
    */
   public void doTestTableMultiGet() throws Exception {
@@ -397,7 +397,7 @@ public class TestThriftServerLegacy exte
 
   /**
    * Test some of the checkAndMutate calls
-   *
+   * 
    * @throws Exception
    */
   public void doTestTableCheckAndMutate() throws Exception {
@@ -458,7 +458,7 @@ public class TestThriftServerLegacy exte
   }
 
   /**
-   *
+   * 
    * @return a List of Mutations for a row, with columnA having valueC and
    *         columnB having valueD
    */

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestHRegionInterfaceSimpleFunctions.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestHRegionInterfaceSimpleFunctions.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestHRegionInterfaceSimpleFunctions.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestHRegionInterfaceSimpleFunctions.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,242 @@
+package org.apache.hadoop.hbase.thrift.swift;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.executor.HBaseEventHandler;
+import org.apache.hadoop.hbase.executor.RegionTransitionEventData;
+import org.apache.hadoop.hbase.ipc.HBaseRPCOptions;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.ipc.ThriftHRegionInterface;
+import org.apache.hadoop.hbase.ipc.thrift.HBaseThriftRPC;
+import org.apache.hadoop.hbase.ipc.thrift.exceptions.ThriftHBaseException;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.MapWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.zookeeper.data.Stat;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestHRegionInterfaceSimpleFunctions {
+  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final int SLAVES = 1;
+  private static final byte[] TABLENAME =
+      Bytes.toBytes("testSimpleHRegionInterfaceFunctions");
+  private static final byte[] FAMILY = Bytes.toBytes("testFamily");
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test(timeout=180000)
+  public void testSimpleHRegionInterfaceFunctions()
+      throws IOException, InterruptedException, ExecutionException, TimeoutException, ThriftHBaseException {
+    HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
+    HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+    ThriftHRegionInterface thriftServer = TEST_UTIL.getHBaseCluster().getThriftRegionServer(0);
+    TEST_UTIL.loadTable(table, FAMILY);
+
+    HServerInfo info = server.getHServerInfo();
+    HRegionInfo regionInfo = null;
+    for (HRegion r : server.getOnlineRegions()) {
+      if (Bytes.BYTES_COMPARATOR.compare(r.getTableDesc().getName(),
+          TABLENAME) == 0) {
+        // Since we have only 1 slave here we can afford to do this
+        regionInfo = r.getRegionInfo();
+        break;
+      }
+    }
+
+    InetSocketAddress addr =
+        new InetSocketAddress(info.getHostname(), server.getThriftServerPort());
+    HRegionInterface client = (HRegionInterface) HBaseThriftRPC
+        .getClient(addr, TEST_UTIL.getConfiguration(), ThriftHRegionInterface.class, HBaseRPCOptions.DEFAULT);
+
+    // tGetClosestRowBefore
+    Result r = null;
+    r = client.getClosestRowBefore(regionInfo.getRegionName(),
+        Bytes.toBytes("abfd"), FAMILY);
+    assertTrue(Bytes.BYTES_COMPARATOR
+        .compare(Bytes.toBytes("abf"), r.getValue(FAMILY, null)) == 0);
+
+    // tGetCurrentTimeMillis
+    long currentTime = client.getCurrentTimeMillis();
+    assertTrue(currentTime <= EnvironmentEdgeManager.currentTimeMillis());
+
+    // tGetLastFlushTime
+    long lastFlushTime = client.getLastFlushTime(regionInfo.getRegionName());
+    assertTrue(currentTime <= EnvironmentEdgeManager.currentTimeMillis());
+
+    // tFlushRegion
+    client.flushRegion(regionInfo.getRegionName());
+    long curTime = System.currentTimeMillis();
+    while (System.currentTimeMillis() - curTime > (180*1000)) {
+      long curFlushTime = client.getLastFlushTime(regionInfo.getRegionName());
+      if (curFlushTime > lastFlushTime) {
+        lastFlushTime = curFlushTime;
+        break;
+      }
+    }
+
+    // tFlushRegionIfOlderThanTS
+    client.flushRegion(regionInfo.getRegionName(),
+        EnvironmentEdgeManager.currentTimeMillis());
+    curTime = System.currentTimeMillis();
+    while (System.currentTimeMillis() - curTime > (180*1000)) {
+      long curFlushTime = client.getLastFlushTime(regionInfo.getRegionName());
+      if (curFlushTime > lastFlushTime) {
+        lastFlushTime = curFlushTime;
+        break;
+      }
+    }
+
+    // tGetLastFlushTimes
+    MapWritable flushTimes = client.getLastFlushTimes();
+    assertEquals(flushTimes.size(), 3);
+    for (Entry<Writable, Writable> entry : flushTimes.entrySet()) {
+      if (Bytes.BYTES_COMPARATOR
+          .compare(((BytesWritable)entry.getKey()).getBytes(), regionInfo.getRegionName()) == 0) {
+        assertTrue(lastFlushTime <= ((LongWritable)entry.getValue()).get());
+      }
+    }
+
+    // tGetStartCode
+    assertEquals(client.getStartCode(), server.getStartCode());
+
+    // tGetStoreFileList
+    assertEquals(client.getStoreFileList(regionInfo.getRegionName(), FAMILY),
+        server.getStoreFileList(regionInfo.getRegionName(), FAMILY));
+
+    // tGetHLogsList
+    assertEquals(client.getHLogsList(false), server.getHLogsList(false));
+
+    // getReginoAssignment
+    List<HRegionInfo> infos = thriftServer.getRegionsAssignment();
+    assertFalse(infos.isEmpty());
+  }
+
+  /**
+   * Test checkAndPut, checkAndDelete, incrementColumnValue through Thrift interface
+   */
+  @Test
+  public void testAtomicMutation() throws Exception {
+    HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
+    ThriftHRegionInterface thriftServer = TEST_UTIL.getHBaseCluster().getThriftRegionServer(0);
+
+    byte[] row = Bytes.toBytes("test-row");
+    byte[] invalidValue = Bytes.toBytes("test-row2");
+    HRegionInfo regionInfo = table.getRegionLocation(row).getRegionInfo();
+    byte[] regionName = regionInfo.getRegionName();
+    Put put = new Put(row);
+    put.add(FAMILY, null, row);
+    assertTrue(thriftServer.checkAndPut(regionName, row, FAMILY, null, null, put));
+    assertFalse(thriftServer.checkAndPut(
+        regionInfo.getRegionName(), row, FAMILY, null, invalidValue, put));
+    Delete delete = new Delete(row);
+    delete.deleteFamily(FAMILY);
+    assertFalse(thriftServer.checkAndDelete(regionName, row, FAMILY, null, invalidValue, delete));
+    assertTrue(thriftServer.checkAndDelete(regionName, row, FAMILY, null, row, delete));
+
+    put = new Put(row);
+    put.add(FAMILY, null, Bytes.toBytes(1L));
+    thriftServer.put(regionName, put);
+    long result = thriftServer.incrementColumnValue(regionName, row, FAMILY, null, 100L, false);
+    assertEquals(101L, result);
+  }
+
+  /**
+   * Test setNumHDFSQuorumReadThreads, setHDFSQuorumReadTimeoutMillis through Thrift interface
+   */
+  @Test
+  public void testQuorumConfigurationChanges() throws Exception {
+    HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+    ThriftHRegionInterface thriftServer = TEST_UTIL.getHBaseCluster().getThriftRegionServer(0);
+
+    int threads = server.getQuorumReadThreadsMax() + 1;
+    long timeout = server.getQuorumReadTimeoutMillis() + 1;
+
+    thriftServer.setNumHDFSQuorumReadThreads(threads);
+    thriftServer.setHDFSQuorumReadTimeoutMillis(timeout);
+
+    assertEquals(threads, server.getQuorumReadThreadsMax());
+    assertEquals(timeout, server.getQuorumReadTimeoutMillis());
+  }
+
+  /**
+   * Test closeRegion through Thrift interface.
+   */
+  @Test
+  public void testCloseRegion() throws Exception {
+    HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+    ThriftHRegionInterface thriftServer = TEST_UTIL.getHBaseCluster().getThriftRegionServer(0);
+
+    HRegion[] region = server.getOnlineRegionsAsArray();
+    HRegionInfo regionInfo = region[0].getRegionInfo();
+
+    // Some initializtion relevant to zk.
+    ZooKeeperWrapper zkWrapper = server.getZooKeeperWrapper();
+    String regionZNode = zkWrapper.getZNode(
+        zkWrapper.getRegionInTransitionZNode(), regionInfo.getEncodedName());
+
+    thriftServer.closeRegion(regionInfo, true);
+
+    byte[] data = zkWrapper.readZNode(regionZNode, new Stat());
+    RegionTransitionEventData rsData = new RegionTransitionEventData();
+    Writables.getWritable(data, rsData);
+
+    // Verify region is closed.
+    assertNull(server.getOnlineRegion(regionInfo.getRegionName()));
+    assertEquals(HBaseEventHandler.HBaseEventType.RS2ZK_REGION_CLOSED, rsData.getHbEvent());
+  }
+
+  /**
+   * Test stop, getStopReason, updateConfiguration through Thrift interface
+   * @throws Exception
+   */
+  @Test
+  public void testStopRegionServer() throws Exception {
+    ThriftHRegionInterface thriftServer = TEST_UTIL.getHBaseCluster().getThriftRegionServer(0);
+    thriftServer.updateConfiguration();
+
+    String why = "test reason";
+    thriftServer.stop(why);
+    assertEquals(thriftServer.getStopReason(), why);
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestMasterToRSUseThrift.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestMasterToRSUseThrift.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestMasterToRSUseThrift.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestMasterToRSUseThrift.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,78 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift.swift;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * A simple test which just brings up the MiniHBase cluster where the master ->
+ * regionserver communication and also the client -> regionserver happens
+ * through thrift. This test just does a simple put and get.
+ *
+ */
+public class TestMasterToRSUseThrift {
+  private final Configuration conf = HBaseConfiguration.create();
+  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
+  private final byte[] tableName = Bytes.toBytes("testMasterToRSUseThrift");
+  private final byte[] family = Bytes.toBytes("family");
+  private final byte[] row = Bytes.toBytes("row");
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMasterToRSUseThrift() throws IOException {
+    HTable table = TEST_UTIL.createTable(tableName, family);
+    Put put = new Put(row);
+    put.add(family, null, row);
+    table.put(put);
+    table.flushCommits();
+    Result r = table.get(new Get(row));
+    assertTrue(Bytes.equals(r.getValue(family, null), row));
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServeRPCAndThriftConcurrently.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServeRPCAndThriftConcurrently.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServeRPCAndThriftConcurrently.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServeRPCAndThriftConcurrently.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,109 @@
+/**
+ * Copyright 2013 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift.swift;
+
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+
+/**
+ * Verify that we are able to serve Thrift and RPC requests in parallel. This
+ * is to ensure that we can serve Hadoop RPC requests while we are transitioning
+ * to Thrift.
+ */
+public class TestServeRPCAndThriftConcurrently extends TestCase {
+  protected final static Log LOG =
+    LogFactory.getLog(TestServeRPCAndThriftConcurrently.class);
+
+  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final int SLAVES = 1;
+  static final byte[] TABLE = Bytes.toBytes("testTable");
+  static final byte[] FAMILY = Bytes.toBytes("family");
+  static final byte[][] FAMILIES = new byte[][] { FAMILY };
+  private Configuration conf;
+
+  /**
+   * Create two clients, one which talks Thrift, the other which talks
+   * Hadoop RPC. Let one of them do puts, the other will do gets to verify
+   * those puts.
+   */
+  public void testSimpleGetsAndPutsInParallel(boolean writeThriftPort)
+    throws IOException, InterruptedException {
+    conf = TEST_UTIL.getConfiguration();
+    conf.setBoolean(HConstants.REGIONSERVER_USE_HADOOP_RPC, true);
+    conf.setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT, true);
+    conf.setBoolean(
+      HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, writeThriftPort);
+    conf.setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+      false);
+    TEST_UTIL.startMiniCluster(SLAVES);
+
+    HTable thriftClient = TEST_UTIL.createTable(TABLE, FAMILIES);
+
+    conf.setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT, false);
+    HTable rpcClient = new HTable(conf, TABLE);
+
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("r1");
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    thriftClient.put(put);
+
+    Result result;
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    result = rpcClient.get(g);
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), value));
+
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Test if we can run both of them if the Thrift port was written in meta.
+   *
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public void testConcurrentWorkingWithThriftPortInMeta()
+    throws IOException, InterruptedException {
+    testSimpleGetsAndPutsInParallel(true);
+  }
+
+  /**
+   * Test if we can run both of them if the Hadoop port was written in meta.
+   *
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public void testConcurrentWorkingWithHadoopPortInMeta()
+    throws IOException, InterruptedException {
+    testSimpleGetsAndPutsInParallel(false);
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServerSideException.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServerSideException.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServerSideException.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServerSideException.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,227 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift.swift;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableAsync;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.ipc.ThriftHRegionInterface;
+import org.apache.hadoop.hbase.regionserver.FailureInjectingThriftHRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionOverloadedException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test failure handling in HTableAsync. We can also use it in
+ */
+public class TestServerSideException {
+  private static final Log LOG = LogFactory.getLog(TestServerSideException.class);
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final int REGION_SERVERS = 2;
+
+  static final byte[] TABLE = Bytes.toBytes("testTable");
+  static final byte[] FAMILY = Bytes.toBytes("testFamily");
+  static final byte[] ROW = Bytes.toBytes("testRow");
+  static final byte[] VALUE = Bytes.toBytes("testValue");
+
+  private static HTableAsync table = null;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    conf.setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    conf.setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+
+    conf.setClass(HConstants.THRIFT_REGION_SERVER_IMPL,
+        FailureInjectingThriftHRegionServer.class, ThriftHRegionInterface.class);
+
+    conf.setInt(HConstants.CLIENT_RETRY_NUM_STRING, 5);
+    // Server will allow client to retry once when there is RegionOverloadedException
+    conf.setInt(HConstants.SERVER_REQUESTED_RETRIES_STRING, 1);
+    conf.setLong(HConstants.HBASE_CLIENT_PAUSE, 200);
+
+    TEST_UTIL.startMiniCluster(REGION_SERVERS);
+
+    table = TEST_UTIL.createTable(TABLE, FAMILY);
+
+    Put put = new Put(ROW);
+    put.add(FAMILY, null, VALUE);
+    table.put(put);
+    table.flushCommits();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Test if HTableAsync behaves correctly when the server throws IOException.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRetriesExhaustedException() throws Exception {
+    // When server continues to throw IOException, we should see RetriesExhaustedException
+    FailureInjectingThriftHRegionServer.setFailureMode(
+        FailureInjectingThriftHRegionServer.FailureType.MIXEDRETRIABLEEXCEPTIONS, Integer.MAX_VALUE);
+
+    Get get = new Get.Builder(ROW).addFamily(FAMILY).create();
+    ListenableFuture<Result> future = table.getAsync(get);
+    boolean hasIOE = false;
+    try {
+      future.get();
+    } catch (ExecutionException e) {
+      LOG.debug("Got exception", e);
+      if (e.getCause() instanceof RetriesExhaustedException) {
+        hasIOE = true;
+      }
+    }
+    assertTrue(hasIOE);
+  }
+
+  /**
+   * Test if HTableAsync behaves correctly when server requests it to wait.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRegionOverloadedExceptions() throws Exception {
+    // Excpect the client to retry once and succeed on the second time
+    FailureInjectingThriftHRegionServer.setFailureMode(
+        FailureInjectingThriftHRegionServer.FailureType.REGIONOVERLOADEDEXCEPTION, 1);
+
+    Get get = new Get.Builder(ROW).addFamily(FAMILY).create();
+    ListenableFuture<Result> future = table.getAsync(get);
+    Result result = future.get();
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), VALUE));
+
+    // Expect the client to retry once and fail on the second attempt.
+    // In the mean time, it should sleep enough time as the server requested.
+    FailureInjectingThriftHRegionServer.setFailureMode(
+        FailureInjectingThriftHRegionServer.FailureType.REGIONOVERLOADEDEXCEPTION, 2);
+
+    future = table.getAsync(get);
+    long futureStartTime = System.currentTimeMillis();
+    boolean hasROE = false;
+    try {
+      future.get();
+    } catch (ExecutionException e) {
+      LOG.debug("Got exception", e);
+      if (e.getCause() instanceof RegionOverloadedException) {
+        long futureFinishTime = System.currentTimeMillis();
+        // The default value is 1000ms, larger than 200ms set for normal retries.
+        // So we know client listens to the instruction of server.
+        assertTrue(futureFinishTime - futureStartTime > HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
+        hasROE = true;
+      }
+    }
+    assertTrue(hasROE);
+  }
+
+  /**
+   * Test if HTableAsync is able to retry for different types of exceptions.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRetriableExceptions() throws Exception {
+    // There are 5 retries. First 4 should fail and the last one should succeed.
+    FailureInjectingThriftHRegionServer.setFailureMode(
+        FailureInjectingThriftHRegionServer.FailureType.MIXEDRETRIABLEEXCEPTIONS, 4);
+
+    Get get = new Get.Builder(ROW).addFamily(FAMILY).create();
+    ListenableFuture<Result> future = table.getAsync(get);
+    Result result = future.get();
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), VALUE));
+  }
+
+  /**
+   * Test if HTableAsync immediately fail on some exceptions as expected.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testNonRetriableExceptions() throws Exception {
+    FailureInjectingThriftHRegionServer.setFailureMode(
+        FailureInjectingThriftHRegionServer.FailureType.DONOTRETRYEXCEPTION, 1);
+    Get get = new Get.Builder(ROW).addFamily(FAMILY).create();
+    ListenableFuture<Result> future = table.getAsync(get);
+    boolean hasDoNotRetryIOE = false;
+    try {
+      future.get();
+    } catch (ExecutionException e) {
+      LOG.debug("Got exception", e);
+      if (e.getCause() instanceof DoNotRetryIOException) {
+        hasDoNotRetryIOE = true;
+      }
+    }
+    assertTrue(hasDoNotRetryIOE);
+  }
+
+  /**
+   * Test if HTableAsync is able to retry in network failure (TTransportException).
+   *
+   * WARNING: This test affects number of region servers. Please add new tests above
+   * it to avoid possible unnecessary debugging.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testTTransportException() throws Exception {
+    HTableAsync table = TEST_UTIL.createTable(
+        Bytes.toBytes("testTable2"), FAMILY);
+
+    int numRegions = TEST_UTIL.createMultiRegions(table, FAMILY);
+    TEST_UTIL.waitUntilAllRegionsAssigned(numRegions);
+
+    Put put = new Put(ROW);
+    put.add(FAMILY, null, VALUE);
+    table.put(put);
+    table.flushCommits();
+
+    FailureInjectingThriftHRegionServer.setFailureMode(
+        FailureInjectingThriftHRegionServer.FailureType.STOP, 1);
+
+    Get get = new Get.Builder(ROW).addFamily(FAMILY).create();
+    ListenableFuture<Result> future = table.getAsync(get);
+    Result result = future.get();
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), VALUE));
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleOperations.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleOperations.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleOperations.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleOperations.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,621 @@
+package org.apache.hadoop.hbase.thrift.swift;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NavigableMap;
+
+
+import com.google.common.util.concurrent.ListenableFuture;
+import junit.framework.Assert;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTableAsyncInterface;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestSimpleOperations {
+  private final Log LOG = LogFactory.getLog(TestSimpleOperations.class);
+  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final int SLAVES = 1;
+  static final String ROW_PREFIX = "row";
+  static final byte[] TABLE = Bytes.toBytes("testTable");
+  static final byte[] FAMILY = Bytes.toBytes("family");
+  static final byte[][] FAMILIES = new byte[][] { FAMILY };
+  static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
+  static final String VALUE_PREFIX = "value";
+  static final byte[] FAMILY1 = Bytes.toBytes("family1");
+  static final byte[] FAMILY2 = Bytes.toBytes("family2");
+  static final byte[][] FAMILIES1 = new byte[][] { FAMILY1, FAMILY2 };
+  static final byte[] QUALIFIER1 = Bytes.toBytes("q1");
+  static final byte[] QUALIFIER2 = Bytes.toBytes("q2");
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Test if doing a simple put using Hadoop RPC, followed by doing a simple get
+   * on that row (using thrift, this time), works or not.
+   * @throws IOException
+   */
+  @Test
+  public void testSimpleGetUsingThrift() throws IOException {
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    ht.put(put);
+
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    Result result1 = ht.get(g);
+
+    LOG.debug("Found the result : " + result1);
+    assertTrue(Bytes.equals(result1.getValue(FAMILY, null), value));
+  }
+
+  /**
+   * Test {@link HRegionInterface#put(byte[], List)}
+   *
+   * Doing a batch mutate with 100 puts inside, then doing a get for each of the
+   * puts
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testPutMultiViaThrift() throws IOException {
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    List<Mutation> mutations = new ArrayList<>();
+    for (int i = 0; i < 100; i++) {
+      byte[] r = Bytes.toBytes("r" + i);
+      Put put = new Put(r);
+      byte[] value = Bytes.toBytes("test-value" + i);
+      put.add(FAMILY, null, value);
+      mutations.add(put);
+    }
+
+    ht.batchMutate(mutations);
+    for (int i = 0; i < 100; i++) {
+      Get g = new Get.Builder(Bytes.toBytes("r" + i)).addFamily(FAMILY).create();
+      Result result1 = ht.get(g);
+      assertTrue(Bytes.equals(result1.getValue(FAMILY, null), Bytes.toBytes("test-value"+i)));
+    }
+  }
+
+  /**
+   * Test if doing a simple put, followed by doing a simple asynchronous
+   * get on that row (using thrift), works or not.
+   * @throws Exception
+   */
+  @Test
+  public void testSimpleAsynchronousGet() throws Exception {
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+
+    HTableAsyncInterface ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    ht.put(put);
+
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    ListenableFuture<Result> future = ht.getAsync(g);
+    Result result1 = future.get();
+
+    assertTrue(Bytes.equals(result1.getValue(FAMILY, null), value));
+  }
+
+  /**
+   * Test if doing a simple put using thrift, followed by doing a simple get
+   * on that row (using Hadoop RPC, this time), works or not.
+   * @throws IOException
+   */
+  @Test
+  public void testSimplePutUsingThrift() throws IOException {
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+    Result result;
+
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    ht.put(put);
+
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    result = ht.get(g);
+
+    LOG.debug("Found the result : " + result);
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), value));
+  }
+
+  /**
+   * Test to verify that multiputs are sent fine from end-to-end, when sent
+   * through Thrift.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testMultiPutUsingThrift() throws Exception {
+    HTable table = TEST_UTIL.createTable(TABLE, FAMILIES);
+    List<Put> puts = createDummyPuts(10);
+    // The list of puts is batched as a MultiPut and sent across.
+    table.put(puts);
+
+    // Now verify if all the puts made it through, use the Hadoop RPC.
+    verifyDummyMultiPut(table, 10);
+  }
+
+  List<Put> createDummyPuts(int n) {
+    List<Put> putList = new ArrayList<Put>();
+    for (int i = 0; i < n; i++) {
+      Put p = new Put(Bytes.toBytes(ROW_PREFIX + i),
+        System.currentTimeMillis());
+      p.add(FAMILY, QUALIFIER, Bytes.toBytes(VALUE_PREFIX + i));
+      putList.add(p);
+    }
+    return putList;
+  }
+
+  void verifyDummyMultiPut(HTable table, int n)
+    throws IOException {
+    for (int i = 0; i < n; i++) {
+      Get g = new Get(Bytes.toBytes(ROW_PREFIX + i));
+      Result r = table.get(g);
+      NavigableMap<byte[], byte[]> familyMap = r.getFamilyMap(FAMILY);
+      assertTrue(Arrays.equals(familyMap.get(QUALIFIER),
+        Bytes.toBytes(VALUE_PREFIX + i)));
+    }
+  }
+
+  /**
+   * Verify that we can do a Delete operation using Delete. We achieve this
+   * by doing a Put, followed by a Get to verify that it went through. Then
+   * we do a Delete followed by a Get to verify that the Delete went through.
+   *
+   * Only the Delete is done using Thrift.
+   * @throws Exception
+   */
+  @Test
+  public void testDeleteUsingThrift() throws Exception {
+    HTable table = TEST_UTIL.createTable(TABLE, FAMILIES);
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+    Result result;
+
+    // Do a Put
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    table.put(put);
+
+    // Do a Get to check it went through
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    result = table.get(g);
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), value));
+
+    // Now do a Delete using Thrift.
+    Delete del = new Delete(r1);
+    table.delete(del);
+
+    // Now do the same get again and check that the delete works.
+    result = table.get(g);
+    assertTrue(result.isEmpty());
+  }
+
+  /**
+   * Verify that we can do a Delete operation using asynchronous Delete. We achieve
+   * this by doing a Put, followed by a Get to verify that it went through. Then
+   * we do a Delete followed by a Get to verify that the Delete went through.
+   *
+   * Only the Delete is done using Thrift.
+   * @throws Exception
+   */
+  @Test
+  public void testSimpleAsynchronousDelete() throws Exception {
+    HTableAsyncInterface table = TEST_UTIL.createTable(TABLE, FAMILIES);
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+    Result result;
+
+    // Do a Put
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    table.put(put);
+
+    // Do a Get to check it went through
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    result = table.get(g);
+    assertTrue(Bytes.equals(result.getValue(FAMILY, null), value));
+
+    // Now do a Delete using Thrift.
+    Delete del = new Delete(r1);
+    ListenableFuture<Void> future = table.deleteAsync(del);
+    future.get();
+
+    // Now do the same get again and check that the delete works.
+    result = table.get(g);
+    assertTrue(result.isEmpty());
+  }
+
+  /**
+   * Check if the Delete thrift annotation lets you do all that the
+   * earlier delete would let you.
+   * @throws Exception
+   */
+  @Test
+  public void testDeleteThriftAnnotationIsSufficient() throws Exception {
+    HTable table = TEST_UTIL.createTable(TABLE, FAMILIES1);
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] r2 = Bytes.toBytes("r2");
+    byte[] v1 = Bytes.toBytes("v1");
+    byte[] v2 = Bytes.toBytes("v2");
+
+    // Do a Put
+    Put put1 = new Put(r1);
+    // Adding two KVs, <FAMILY1:QUALIFIER1, v1> and <FAMILY2:QUALIFIER2, v2>
+    put1.add(FAMILY1, QUALIFIER1, v1);
+    put1.add(FAMILY2, QUALIFIER2, v2);
+    table.put(put1);
+
+    // Now do a Delete using Thrift.
+    Delete del1 = new Delete(r1);
+    // We delete only FAMILY2. The first KV should survive.
+    del1.deleteFamily(FAMILY2);
+    // Use thrift while doing this.
+    table.delete(del1);
+
+    Get g1 = new Get(r1);
+    Result res1 = table.get(g1);
+    // We should get only one Key Value, since we deleted the other CF.
+    assertEquals(1, res1.list().size());
+
+    // Make sure that we got the right KV: <FAMILY1:QUALIFIER1, v1>
+    KeyValue kv1 = res1.list().get(0);
+    assertTrue(Bytes.equals(kv1.getFamily(), FAMILY1));
+    assertTrue(Bytes.equals(kv1.getQualifier(), QUALIFIER1));
+    assertTrue(Bytes.equals(kv1.getValue(), v1));
+
+    Put put2 = new Put(r2);
+    put2.add(FAMILY1, QUALIFIER1, v1);
+    put2.add(FAMILY1, QUALIFIER2, v2);
+    table.put(put2);
+
+    Delete del2 = new Delete(r2);
+    // Now try if we can delete columns.
+    del2.deleteColumn(FAMILY1, QUALIFIER1);
+    // Use thrift to do this.
+    table.delete(del2);
+
+    Get g2 = new Get(r2);
+    // Do a standard get.
+    Result res2 = table.get(g2);
+
+    // Since we deleted <FAMILY1:QUALIFIER1, v1>, we expect only
+    // <FAMILY1:QUALIFIER2, v2>
+    KeyValue kv2 = res2.list().get(0);
+    // Check everything is the same.
+    assertTrue(Bytes.equals(kv2.getFamily(), FAMILY1));
+    assertTrue(Bytes.equals(kv2.getQualifier(), QUALIFIER2));
+    assertTrue(Bytes.equals(kv2.getValue(), v2));
+  }
+
+  /**
+   * Test if we can do a batch of deletes using Thrift. Do this by making a
+   * bunch of puts and gets (to verify that the puts went through), and then
+   * doing a batch of deletes for the puts, followed by gets (to verify that
+   * the deletes went through).
+   *
+   * Only the Deletes use Thrift.
+   * @throws IOException
+   */
+  @Test
+  public void testBatchOfDeletesUsingThrift() throws IOException {
+    HTable table = TEST_UTIL.createTable(TABLE, FAMILIES);
+    Result result;
+    final int numPuts = 10;
+
+    // Do a bunch of puts
+    for (int i = 0; i < numPuts; i++) {
+      Put put = new Put(Bytes.toBytes(ROW_PREFIX + i));
+      put.add(FAMILY, null, Bytes.toBytes(VALUE_PREFIX + i));
+      table.put(put);
+    }
+
+    // Do gets to verify that they happened
+    for (int i = 0; i < numPuts; i++) {
+      byte[] r = Bytes.toBytes(ROW_PREFIX + i);
+      Get g = new Get.Builder(r).addFamily(FAMILY).create();
+      result = table.get(g);
+      assertTrue(Bytes.equals(result.getValue(FAMILY, null),
+        Bytes.toBytes(VALUE_PREFIX + i)));
+    }
+
+    List<Delete> deletes = new LinkedList<Delete>();
+    for (int i = 0; i < numPuts; i++) {
+      deletes.add(new Delete(Bytes.toBytes(ROW_PREFIX + i)));
+    }
+    table.delete(deletes);
+
+    for (int i = 0; i < numPuts; i++) {
+      byte[] r = Bytes.toBytes(ROW_PREFIX + i);
+      Get g = new Get.Builder(r).addFamily(FAMILY).create();
+      result = table.get(g);
+      assertTrue(result.isEmpty());
+    }
+  }
+
+  /**
+   * Check if the Put thrift annotation lets you do all that the
+   * earlier Put would let you.
+   * @throws Exception
+   */
+  @Test
+  public void testPutThriftAnnotationIsSufficient() throws Exception {
+    HTable table = TEST_UTIL.createTable(TABLE, FAMILIES1);
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] r2 = Bytes.toBytes("r2");
+    byte[] v1 = Bytes.toBytes("v1");
+    byte[] v2 = Bytes.toBytes("v2");
+
+    // Do a Put using Thrift
+    Put put1 = new Put(r1);
+    // Adding two KVs, <FAMILY1:QUALIFIER1, v1> and <FAMILY2:QUALIFIER2, v2>
+    put1.add(FAMILY1, QUALIFIER1, v1);
+    put1.add(FAMILY2, QUALIFIER2, v2);
+    table.put(put1);
+
+    // Now do a Get using Hadoop RPC
+    Get get1 = new Get(r1);
+    Result result1 = table.get(get1);
+    assertEquals(2, result1.list().size());
+    KeyValue kv1 = result1.list().get(0);
+    assertTrue(Bytes.equals(FAMILY1, kv1.getFamily()));
+    assertTrue(Bytes.equals(QUALIFIER1, kv1.getQualifier()));
+    assertTrue(Bytes.equals(v1, kv1.getValue()));
+
+    KeyValue kv2 = result1.list().get(1);
+    assertTrue(Bytes.equals(FAMILY2, kv2.getFamily()));
+    assertTrue(Bytes.equals(QUALIFIER2, kv2.getQualifier()));
+    assertTrue(Bytes.equals(v2, kv2.getValue()));
+
+    // Do another put using Thrift, and using a different setter method.
+    Put put2 = new Put(r2);
+    KeyValue kv3 = new KeyValue(r2, FAMILY2, QUALIFIER1, v2);
+    put2.add(kv3);
+    table.put(put2);
+
+    // Now do a Get using Hadoop RPC
+    Get get2 = new Get(r2);
+    Result result2 = table.get(get2);
+    assertEquals(1, result2.list().size());
+    KeyValue kv4 = result2.list().get(0);
+    assertTrue(Bytes.equals(FAMILY2, kv4.getFamily()));
+    assertTrue(Bytes.equals(QUALIFIER1, kv4.getQualifier()));
+    assertTrue(Bytes.equals(v2, kv4.getValue()));
+  }
+
+  /**
+   * Test getRowOrBefore test
+   * @throws IOException
+   */
+  @Test
+  public void testGetRowOrBefore() throws IOException {
+    HTable table = TEST_UTIL.createTable(Bytes.toBytes("testGetRowOrBefore"),
+        FAMILY);
+    byte[] value = Bytes.toBytes("value");
+    Put p = new Put(Bytes.toBytes("bb"));
+    p.add(FAMILY, null, value);
+    table.put(p);
+    p = new Put(Bytes.toBytes("ba"));
+    p.add(FAMILY, null, value);
+    table.put(p);
+    table.flushCommits();
+
+    Result r = table.getRowOrBefore(Bytes.toBytes("bad"), FAMILY);
+    assertTrue(Bytes.equals(Bytes.toBytes("ba"), r.getRow()));
+  }
+
+  /**
+   * Test getRowOrBeforeAsync
+   * @throws Exception
+   */
+  @Test
+  public void testGetRowOrBeforeAsync() throws Exception {
+    HTableAsyncInterface table = TEST_UTIL.createTable(Bytes.toBytes("testGetRowOrBefore"),
+        FAMILY);
+    byte[] value = Bytes.toBytes("value");
+    Put p = new Put(Bytes.toBytes("bb"));
+    p.add(FAMILY, null, value);
+    table.put(p);
+    p = new Put(Bytes.toBytes("ba"));
+    p.add(FAMILY, null, value);
+    table.put(p);
+    table.flushCommits();
+
+    Result r = table.getRowOrBeforeAsync(Bytes.toBytes("bad"), FAMILY).get();
+    assertTrue(Bytes.equals(Bytes.toBytes("ba"), r.getRow()));
+  }
+
+  /**
+   * Test if doing batchGet works via thrift
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSimpleMultiActionUsingThrift() throws IOException {
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+    Result[] result;
+
+    // do a put via Thrift
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    ht.put(put);
+
+    // do a batch get via Thrift
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    List<Get> gets = new ArrayList<Get>();
+    gets.add(g);
+    result = ht.batchGet(gets);
+    LOG.debug("Found the result : " + result);
+    assertTrue(result.length == 1);
+    assertTrue(Bytes.equals(result[0].getValue(FAMILY, null), value));
+  }
+
+  /**
+   * Test if doing batchGet works via hadoop rpc
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSimpleMultiActionUsingHadoopRpc() throws IOException {
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+    Result[] result;
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+
+    //do a put via hadoop rpc
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    ht.put(put);
+
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    List<Get> gets = new ArrayList<Get>();
+    gets.add(g);
+    // do a batch get via hadoop rpc
+    result = ht.batchGet(gets);
+    LOG.debug("Found the result : " + result);
+    assertTrue(result.length == 1);
+    assertTrue(Bytes.equals(result[0].getValue(FAMILY, null), value));
+  }
+
+  /**
+   * Test asynchronous put and batch get.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testBatchGetAsyncAction() throws Exception {
+    byte[] r1 = Bytes.toBytes("r1");
+    byte[] value = Bytes.toBytes("test-value");
+    Result[] result;
+
+    // do an asynchronous put
+    HTableAsyncInterface ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    Put put = new Put(r1);
+    put.add(FAMILY, null, value);
+    ht.put(put);
+
+    // do a flush just for fun
+    ht.flushCommitsAsync().get();
+
+    // do an asynchronous batch get
+    Get g = new Get.Builder(r1).addFamily(FAMILY).create();
+    List<Get> gets = new ArrayList<>();
+    gets.add(g);
+    ListenableFuture<Result[]> future = ht.batchGetAsync(gets);
+    result = future.get();
+    LOG.debug("Found the result : " + result);
+    assertTrue(result.length == 1);
+    assertTrue(Bytes.equals(result[0].getValue(FAMILY, null), value));
+  }
+
+  /**
+   * Do a batch mutate with 100 puts inside, then do a get for each of the
+   * puts. Finally delete 90 rows and verify if scanner could get 10 rows.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testMultiAsyncActions() throws Exception {
+    HTableAsyncInterface ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    List<Mutation> mutations = new ArrayList<>();
+    for (int i = 0; i < 100; i++) {
+      byte[] r = Bytes.toBytes("r" + i);
+      Put put = new Put(r);
+      byte[] value = Bytes.toBytes("test-value" + i);
+      put.add(FAMILY, null, value);
+      mutations.add(put);
+    }
+
+    ht.batchMutateAsync(mutations).get();
+    List<ListenableFuture<Result>> getFutures = new ArrayList<>();
+    for (int i = 0; i < 100; i++) {
+      Get g = new Get.Builder(Bytes.toBytes("r" + i)).addFamily(FAMILY).create();
+      getFutures.add(ht.getAsync(g));
+    }
+    for (int i = 0; i < 100; i++) {
+      Result result = getFutures.get(i).get();
+      assertTrue(Bytes.equals(result.getValue(FAMILY, null), Bytes.toBytes("test-value"+i)));
+    }
+
+    ArrayList<Mutation> deletes = new ArrayList<>();
+    for (int i = 0; i < 90; i++) {
+      Delete delete = new Delete(Bytes.toBytes("r" + i));
+      delete.deleteFamily(FAMILY);
+      deletes.add(delete);
+    }
+    ht.batchMutateAsync(deletes).get();
+    Scan scan = new Scan.Builder().addFamily(FAMILY).create();
+    ResultScanner scanner = ht.getScanner(scan);
+    int numRows = 0;
+    while (scanner.next() != null) {
+      ++numRows;
+    }
+    assertEquals(numRows, 10);
+  }
+
+  /**
+   * Lock a row and unlock it.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRowLock() throws Exception {
+    HTableAsyncInterface ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+    byte[] row = Bytes.toBytes("test-row");
+    Put put = new Put(row);
+    put.add(FAMILY, null, row);
+    ht.put(put);
+    ht.flushCommitsAsync().get();
+
+    RowLock lock = ht.lockRowAsync(row).get();
+    assertTrue(lock.getLockId() > 0);
+    Assert.assertTrue(Bytes.equals(lock.getRow(), row));
+
+    ht.unlockRowAsync(lock).get();
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleRowMutations.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleRowMutations.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleRowMutations.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleRowMutations.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,168 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift.swift;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTableAsyncInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSimpleRowMutations {
+  byte[] tableName = Bytes.toBytes("testSimpleRowMutationsUsingSwift");
+  byte[] family1 = Bytes.toBytes("family1");
+  byte[] family2 = Bytes.toBytes("family2");
+  byte[] qual = Bytes.toBytes("qual");
+  byte[] row = Bytes.toBytes("rowkey");
+
+  protected final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final int SLAVES = 1;
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testRowMutationsBuilder() throws IOException {
+    HTable table =
+        TEST_UTIL.createTable(tableName, new byte[][] {family1, family2});
+    RowMutations b = new RowMutations(row);
+    Put dp = new Put(row);
+    dp.add(family2, qual, family1);
+    table.put(dp);
+
+    Put p = new Put(row);
+    p.add(family1, qual, family1);
+    b.add(p);
+
+    Delete d = new Delete(row);
+    d.deleteFamily(family2);
+    b.add(d);
+
+    RowMutations arm = b;
+    table.mutateRow(arm);
+
+    Get g = new Get(row);
+    g.addColumn(family1, qual);
+    g.addColumn(family2, qual);
+
+    Result r = table.get(g);
+    assertNotNull(r.getValue(family1, qual));
+    assertEquals(
+        Bytes.BYTES_COMPARATOR.compare(r.getValue(family1, qual), family1), 0);
+    assertNull(r.getValue(family2, qual));
+
+    int num = 10;
+    List<RowMutations> mutations = new ArrayList<RowMutations>();
+    byte[] qualifier = Bytes.toBytes("qualifier");
+    for (int i = 0; i<num; i++) {
+      byte[] rowi = Bytes.toBytes("row" + i);
+      Put pi = new Put(rowi);
+      byte[] valuei = Bytes.toBytes("value" + i);
+      pi.add(family1, qualifier, valuei);
+      RowMutations mut = new RowMutations.Builder(rowi).add(pi).create();
+      mutations.add(mut);
+    }
+    table.mutateRow(mutations);
+    HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+    HRegionInfo info =
+        table.getRegionLocation(Bytes.toBytes("row")).getRegionInfo();
+    for (int i = 0; i < num; i++) {
+      byte[] rowi = Bytes.toBytes("row" + i);
+      Get gi = new Get(rowi);
+      byte[] expectedValue = Bytes.toBytes("value" + i);
+      byte[] actualValue = server.get(info.getRegionName(),
+          gi).getValue(family1, qualifier);
+      System.out.println("expectedValue : " + Bytes.toString(expectedValue) +
+          " actualValue : " + Bytes.toString(actualValue));
+      assertTrue(Bytes.equals(expectedValue, actualValue));
+    }
+  }
+
+  /**
+   * Test asynchronous row mutation.
+   * @throws IOException
+   * @throws ExecutionException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testAsynchronousRowMutation()
+      throws IOException, ExecutionException, InterruptedException {
+    HTableAsyncInterface table =
+        TEST_UTIL.createTable(tableName, new byte[][] {family1, family2});
+    RowMutations.Builder b = new RowMutations.Builder(row);
+    Put dp = new Put(row);
+    dp.add(family2, qual, family1);
+    table.put(dp);
+
+    Put p = new Put(row);
+    p.add(family1, qual, family1);
+    b = b.add(p);
+
+    Delete d = new Delete(row);
+    d.deleteColumn(family2, qual);
+    b.add(d);
+
+    RowMutations arm = b.create();
+    table.mutateRowAsync(arm).get();
+
+    Get g = new Get(row);
+    g.addColumn(family1, qual);
+    g.addColumn(family2, qual);
+
+    Result r = table.get(g);
+    assertEquals(
+        Bytes.BYTES_COMPARATOR.compare(r.getValue(family1, qual), family1), 0);
+    assertNull(r.getValue(family2, qual));
+  }
+}