You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC

svn commit: r1576909 [18/18] - in /hbase/branches/0.89-fb/src: ./ examples/thrift/ main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/avro/ main/java/org/apache/hadoop/hbase/avro/generated/ main/java/org/apache/hadoop/hbase/client/ ma...

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleScan.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleScan.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleScan.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleScan.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,163 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift.swift;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTableAsync;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSimpleScan {
+  private final Log LOG = LogFactory.getLog(TestSimpleScan.class);
+  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final int SLAVES = 1;
+  byte[] tableName = Bytes.toBytes("testSimpleGetUsingSwift");
+  byte[] family1 = Bytes.toBytes("family1");
+  byte[] family2 = Bytes.toBytes("family2");
+  byte[][] families = new byte[][] { family1, family2 };
+  String rowPrefix1 = "r-f1-";
+  String rowPrefix2 = "r-f2-";
+  String valuePrefix1 = "val-f1-";
+  String valuePrefix2 = "val-f2-";
+  String rowFormat = "%s%03d";
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Tests the use of swift to perform a scan.
+   * @throws IOException
+   */
+  @Test
+  public void testSimpleScanUsingSwift() throws IOException {
+
+    HTable ht = TEST_UTIL.createTable(tableName, families);
+    // Inserting some rows in the family1
+    // The rows look as follows : "r-f1-020"
+    insertRows(ht, 100, family1, rowPrefix1, valuePrefix1);
+    // Flushing the data to disk. rows look as follows : "r-f2-020"
+    TEST_UTIL.flush(tableName);
+    // Inserting some rows into family2
+    insertRows(ht, 100, family2, rowPrefix2, valuePrefix2);
+
+    Scan s = new Scan.Builder().addColumn(family1, null).setCaching(10)
+        .setStartRow(Bytes.toBytes(String.format(rowFormat, rowPrefix1, 20))).create();
+
+    ResultScanner scanner = ht.getScanner(s);
+    int rowNumber = 19;
+
+    // Given the scan, we expect the scan to go through r-f1-020, r-f1-021, ..
+    // r-f1-100
+    for (int i = 0; i < 100; i++) {
+      Result r = scanner.next();
+      LOG.debug(r);
+      if (r == null) {
+        // the scan should stop after we scan the row r-f1-100
+        assertTrue(rowNumber >= 100);
+        break;
+      }
+      rowNumber++;
+      assertTrue(Bytes.BYTES_COMPARATOR.compare(r.getRow(),
+          Bytes.toBytes(String.format(rowFormat, rowPrefix1, rowNumber))) == 0);
+      assertTrue(Bytes.BYTES_COMPARATOR.compare(r.getValue(family1, null),
+          Bytes.toBytes(String.format(rowFormat, valuePrefix1, rowNumber))) == 0);
+    }
+  }
+
+  /**
+   * Inserts Rows into the table. The row keys look as <rowPrefix><001>
+   * @param table
+   * @param numberOfRows
+   * @param family
+   * @param rowPrefix
+   * @param valuePrefix
+   * @throws IOException
+   */
+  private void insertRows(HTable table, int numberOfRows, byte[] family,
+      String rowPrefix, String valuePrefix) throws IOException {
+    for (int i = 1; i <= numberOfRows; i++) {
+      Put put = new Put(Bytes.toBytes(String.format(rowFormat, rowPrefix, i)));
+      put.add(family, null, Bytes.toBytes(String.format(rowFormat, valuePrefix, i)));
+      table.put(put);
+    }
+  }
+
+  /**
+   * Tests the results of Scanning contains TRegionInfo
+   */
+  @Test
+  public void testScanResultsTHReginoInfo() throws Exception {
+    TEST_UTIL.createTable(tableName, families).close();
+    TEST_UTIL.flush();
+
+    HTable ht = new HTableAsync(TEST_UTIL.getConfiguration(),
+        HConstants.META_TABLE_NAME);
+
+    Scan s = new Scan.Builder().addFamily(HConstants.CATALOG_FAMILY)
+        .create();
+
+    int count = 0;
+    ResultScanner scanner = ht.getScanner(s);
+    while (true) {
+      Result r = scanner.next();
+      if (r == null) {
+        break;
+      }
+      LOG.debug(r);
+      count++;
+      byte[] ri = r.getValue(HConstants.CATALOG_FAMILY,
+          HConstants.REGIONINFO_QUALIFIER);
+      Assert.assertNotNull("REGIONINFO_QUALIFIER not exists", ri);
+      byte[] tri = r.getValue(HConstants.CATALOG_FAMILY,
+          HConstants.THRIFT_REGIONINFO_QUALIFIER);
+      Assert.assertNotNull("THRIFT_REGIONINFO_QUALIFIER not exists", tri);
+    }
+    Assert.assertEquals("Number of rows", 1, count);
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSwiftSerDe.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSwiftSerDe.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSwiftSerDe.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSwiftSerDe.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,214 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift.swift;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TRowMutations;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.TFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.io.hfile.histogram.HFileHistogram.Bucket;
+import org.apache.hadoop.hbase.io.hfile.histogram.HFileHistogram.HFileStat;
+import org.apache.hadoop.hbase.master.AssignmentPlan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.transport.TMemoryBuffer;
+import org.junit.Test;
+
+import com.facebook.swift.codec.ThriftCodec;
+import com.facebook.swift.codec.ThriftCodecManager;
+
+public class TestSwiftSerDe {
+
+  byte[] family1 = Bytes.toBytes("family1");
+  byte[] family2 = Bytes.toBytes("family2");
+  byte[] qual = Bytes.toBytes("qual");
+  byte[] row = Bytes.toBytes("rowkey");
+
+  @Test
+  public void testTRowMutationsSwiftSerDe() throws Exception {
+    ThriftCodec<TRowMutations> codec = new ThriftCodecManager()
+        .getCodec(TRowMutations.class);
+    TMemoryBuffer transport = new TMemoryBuffer(1000 * 1024);
+    TCompactProtocol protocol = new TCompactProtocol(transport);
+
+    Put p1 = new Put(row);
+    p1.add(family1, qual, family1);
+    Put p2 = new Put(row);
+    p2.add(family2, qual, family2);
+    Delete d = new Delete(row);
+    d.deleteColumn(family1, qual);
+    TRowMutations mutations = (new TRowMutations.Builder(row))
+        .addPut(p1).addPut(p2).addDelete(d).create();
+
+    codec.write(mutations, protocol);
+    TRowMutations copy = codec.read(protocol);
+    assertTrue(Bytes.BYTES_COMPARATOR.compare(mutations.getRow(), copy.getRow()) == 0);
+    assertEquals(mutations.getPuts().size(), copy.getPuts().size());
+
+    /**
+     * 3(instead of 2) Because we insert no-ops in the places
+     * corresponding to the deletes
+     */
+    assertEquals(3, copy.getPuts().size());
+    for (int i = 0; i<2; i++) {
+      mutations.getPuts().get(i).equals(copy.getPuts().get(i));
+    }
+    assertEquals(mutations.getDeletes().size(), copy.getDeletes().size());
+    assertEquals(3, copy.getDeletes().size());
+    mutations.getDeletes().get(0).equals(copy.getDeletes().get(0));
+  }
+
+  /**
+   * Tests the scan serialization and deserailization with the Scan Builder
+   * This one uses the addColumn to construct the scan object.
+   * @throws Exception
+   */
+  @Test
+  public void testScanSerDe() throws Exception {
+
+    Scan.Builder b = new Scan.Builder();
+    Filter filter = new WhileMatchFilter(
+        new PrefixFilter(Bytes.toBytes("poiuy")));
+    Scan s = b.addColumn(family1, qual)
+      .setBatch(3425)
+      .setCacheBlocks(true)
+      .setCaching(8647)
+      .setEffectiveTS(87434)
+      .setMaxResultsPerColumnFamily(7624)
+      .setMaxVersions(34)
+      .setRowOffsetPerColumnFamily(186434)
+      .setServerPrefetching(true)
+      .setStartRow(Bytes.toBytes("hfy"))
+      .setStopRow(Bytes.toBytes("ijk"))
+      .setTimeRange(3455, 8765)
+      .setFilter(filter)
+      .create();
+    byte[] data = Bytes.writeThriftBytes(s, Scan.class);
+    Scan copy = Bytes.readThriftBytes(data, Scan.class);
+    assertEquals(s, copy);
+  }
+
+  /**
+   * Constructs the Scan object using addFamily and without the builder
+   * @throws Exception
+   */
+  @Test
+  public void testScanSerDe2() throws Exception {
+    Scan s = new Scan();
+    s.addFamily(family1);
+    s.setBatch(3425);
+    s.setCacheBlocks(true);
+    s.setCaching(8647);
+    s.setEffectiveTS(87434);
+    s.setMaxResultsPerColumnFamily(7624);
+    s.setMaxVersions(34);
+    s.setRowOffsetPerColumnFamily(186434);
+    s.setServerPrefetching(true);
+    s.setStartRow(Bytes.toBytes("hfy"));
+    s.setStopRow(Bytes.toBytes("ijk"));
+    s.setTimeRange(3455, 8765);
+    byte[] data = Bytes.writeThriftBytes(s, Scan.class);
+    Scan copy = Bytes.readThriftBytes(data, Scan.class);
+    assertEquals(s, copy);
+  }
+
+  @Test
+  public void testFilterSerDe() throws Exception {
+    FilterList f = new FilterList(Operator.MUST_PASS_ALL);
+    byte[] randomPrefix = Bytes.toBytes("randomPrefix1");
+    PrefixFilter f1 = new PrefixFilter(randomPrefix);
+    f.addFilter(f1);
+    PageFilter f2 = new PageFilter(1024);
+    f.addFilter(f2);
+    KeyOnlyFilter f3 = new KeyOnlyFilter(false);
+    f.addFilter(f3);
+    TFilter tf = TFilter.getTFilter(f);
+    byte[] data = Bytes.writeThriftBytes(tf, TFilter.class);
+    TFilter copy = Bytes.readThriftBytes(data, TFilter.class);
+    FilterList flCopy = (FilterList)copy.getFilter();
+    assertEquals(flCopy.getFilters().size(), 3);
+    assertTrue(flCopy.getFilters().get(0) instanceof PrefixFilter);
+    byte[] expectedRandomPrefix = ((PrefixFilter)flCopy.getFilters().get(0)).getPrefix();
+    assertTrue(Bytes.equals(expectedRandomPrefix, randomPrefix));
+    assertTrue(flCopy.getFilters().get(1) instanceof PageFilter);
+    assertEquals(((PageFilter)flCopy.getFilters().get(1)).getPageSize(), 1024);
+    assertTrue(flCopy.getFilters().get(2) instanceof KeyOnlyFilter);
+  }
+
+  /**
+   * This test case tests the working of the AssignmentPlan serialization and
+   * de-serialization via thrift.
+   * @throws Exception
+   */
+  @Test
+  public void testAssignmentPlanSerDe() throws Exception {
+    AssignmentPlan originalPlan = new AssignmentPlan();
+    List<HServerAddress> lst = new ArrayList<HServerAddress>();
+    HTableDescriptor desc = new HTableDescriptor(
+        Bytes.toBytes("testAssignmentPlan"),
+        new ArrayList<HColumnDescriptor>(),
+        new HashMap<byte[], byte[]>());
+    HRegionInfo info = new HRegionInfo(desc,
+        Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), false, 0);
+    lst.add(new HServerAddress(
+        java.net.InetAddress.getLocalHost().getHostName(), 0));
+    originalPlan.updateAssignmentPlan(info, lst);
+    byte[] data = Bytes.writeThriftBytes(originalPlan, AssignmentPlan.class);
+    AssignmentPlan planCopy = Bytes.readThriftBytes(data, AssignmentPlan.class);
+
+    assertEquals(originalPlan, planCopy);
+  }
+
+  @Test
+  public void testHFileHistogramSerDe() throws Exception {
+    byte [] startRow = Bytes.toBytes("testStartRow");
+    byte [] endRow = Bytes.toBytes("testEndRow");
+    int cnt = 827946;
+    Bucket b = new Bucket.Builder()
+      .setStartRow(startRow)
+      .setEndRow(endRow)
+      .setNumRows(cnt)
+      .addHFileStat(HFileStat.KEYVALUECOUNT, 8726.0)
+      .create();
+    Bucket bCopy = Bytes.readThriftBytes(
+        Bytes.writeThriftBytes(b, Bucket.class), Bucket.class);
+    assertEquals(b, bCopy);
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftExceptions.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftExceptions.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftExceptions.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftExceptions.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,103 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift.swift;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.ipc.HBaseRPCOptions;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.ipc.ThriftHRegionInterface;
+import org.apache.hadoop.hbase.ipc.thrift.HBaseThriftRPC;
+import org.apache.hadoop.hbase.ipc.thrift.exceptions.ThriftHBaseException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+/**
+ * This is a test class to verify if the Thrift Exceptions work as expected.
+ */
+public class TestThriftExceptions extends TestCase {
+  /**
+   * Test if the ThriftHBaseException can be serialized and deserialized
+   * correctly.
+   */
+  @Test
+  public void testSerialization() throws Exception {
+    IOException ioe = new IOException("FooBar");
+    ThriftHBaseException thriftHBaseException = new ThriftHBaseException(ioe);
+    byte[] thriftHBaseExceptionBytes =
+      Bytes.writeThriftBytes(thriftHBaseException, ThriftHBaseException.class);
+
+    ThriftHBaseException deserThriftHBaseException =
+      Bytes.readThriftBytes(thriftHBaseExceptionBytes,
+                            ThriftHBaseException.class);
+
+    assertEquals(deserThriftHBaseException, thriftHBaseException);
+  }
+
+
+  /**
+   * Test if the exceptions raised on the server end, are wrapped correctly
+   * in the ThriftHBaseException and are decoded correctly using the
+   * HBaseThriftAdapter.
+   */
+  @Test
+  public void testExceptionTranslation()
+    throws InterruptedException, IOException {
+    HBaseTestingUtility testUtil = new HBaseTestingUtility();
+    testUtil.getConfiguration().setBoolean(
+      HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    testUtil.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+      true);
+    testUtil.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+      true);
+    testUtil.startMiniCluster(1);
+    Configuration conf = testUtil.getConfiguration();
+
+    int port = testUtil.getHBaseCluster().getRegionServer(0).getThriftServerPort();
+
+    InetSocketAddress addr = new InetSocketAddress(port);
+    HRegionInterface client = (HRegionInterface) HBaseThriftRPC.getClient(addr,
+      conf, ThriftHRegionInterface.class, HBaseRPCOptions.DEFAULT);
+
+    boolean illegalArgumentException = false;
+    try {
+      client.flushRegion(Bytes.toBytes("foobar"));
+    } catch (IllegalArgumentException e) {
+      illegalArgumentException = true;
+    }
+    assertTrue("Expected IllegalArgumentException", illegalArgumentException);
+
+    boolean notServingRegionException = false;
+    try {
+      client.getClosestRowBefore(Bytes.toBytes("foo"),
+      Bytes.toBytes("bar"), Bytes.toBytes("baz"));
+    } catch (NotServingRegionException e) {
+      notServingRegionException = true;
+    }
+    assertTrue("Expected NotServingRegionException", notServingRegionException);
+    testUtil.shutdownMiniCluster();
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftMultiRSScenario.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftMultiRSScenario.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftMultiRSScenario.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftMultiRSScenario.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,79 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift.swift;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestThriftMultiRSScenario {
+  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final int SLAVES = 3;
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META, true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        true);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Tests a simple scenario where we can run unit tests with 3 slaves.
+   * @throws IOException
+   */
+  @Test
+  public void testMultiRSScenario() throws IOException {
+    byte[] tableName = Bytes.toBytes("testMultiRSScenario");
+    byte[] family = Bytes.toBytes("family");
+    byte[] row = Bytes.toBytes("row");
+    HTable table = TEST_UTIL.createTable(tableName, family);
+    Put p = new Put(row);
+    p.add(family, null, row);
+    table.put(p);
+
+    Get g = new Get(row);
+    Result r = table.get(g);
+
+    assertTrue(Bytes.equals(r.getValue(family, null), row));
+  }
+
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java Wed Mar 12 21:17:13 2014
@@ -39,17 +39,17 @@ public class HBaseHomePath {
       throw new RuntimeException("Could not lookup class location for " + className);
     }
 
-    String path = url.getPath();
+    String path = url.getPath(); 
     if (!path.endsWith(relPathForClass)) {
-      throw new RuntimeException("Got invalid path trying to look up class " + className +
-          ": " + path);
+      throw new RuntimeException("Got invalid path trying to look up class " + className + 
+          ": " + path); 
     }
     path = path.substring(0, path.length() - relPathForClass.length());
 
     if (path.startsWith(FILE_PREFIX)) {
       path = path.substring(FILE_PREFIX.length());
     }
-
+    
     if (path.endsWith(TARGET_CLASSES)) {
       path = path.substring(0, path.length() - TARGET_CLASSES.length());
     } else if (path.endsWith(JAR_SUFFIX)) {

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java Wed Mar 12 21:17:13 2014
@@ -544,3 +544,4 @@ public class ProcessBasedLocalHBaseClust
   }
 
 }
+

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java Wed Mar 12 21:17:13 2014
@@ -92,7 +92,7 @@ public class RestartMetaTest extends Abs
     ProcessBasedLocalHBaseCluster hbaseCluster =
         new ProcessBasedLocalHBaseCluster(conf, NUM_DATANODES, numRegionServers);
     hbaseCluster.startMiniDFS();
-
+    
     // start the process based HBase cluster
     hbaseCluster.startHBase();
 

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TagRunner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TagRunner.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TagRunner.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TagRunner.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,118 @@
+/**
+ * Copyright 2014 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.junit.runner.Description;
+import org.junit.runner.Runner;
+import org.junit.runner.notification.RunNotifier;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+
+/**
+ * A TagRunner is used to control the running JUnit testcases by tags.
+ * To use a <code>TagRunner</code>:
+ * <ol>
+ * <li>Annotate the testing class with <code>@RunWith(TagRunner.class)</code></li>
+ * <li>Annotate the a method with <code>@TestTag({"tag"})</code></li>
+ * <li>Now you can specify the system property <code>testskip</code> to skip all
+ * the test cases with some tags.</li>
+ * </ol>
+ *
+ * <code>testskip</code> is a semicolon separated string contains all the tags
+ * to be skipped. To specify a system property under Maven is like this
+ * <code> mvn test -Dtestskip=tag1;tag2</code>
+ *
+ * <code>all</code> is a special tag which will trigger all tags to be skipped.
+ */
+public class TagRunner extends Runner {
+  private static final Set<String> skipTags = new HashSet<String>();
+  private static boolean skipAll = false;
+  /**
+   * The system property setting skipping tags. The property is a semicolon
+   * separated string.
+   */
+  private static String PROP_TESTSKIP = "testskip";
+  /**
+   * A special tags hitting all tags.
+   */
+  private static String TAG_ALL = "all";
+  static {
+    String tagStr = System.getProperty(PROP_TESTSKIP, "");
+    if (tagStr.length() > 0) {
+      for (String tag : tagStr.split(";")) {
+        skipTags.add(tag);
+      }
+    }
+    skipAll = skipTags.contains(TAG_ALL);
+  }
+
+  static private boolean shouldSkip(String[] tags) {
+    if (skipAll) {
+      return true;
+    }
+
+    for (String tag: tags) {
+      if (skipTags.contains(tag)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private static class SelectiveRunner extends BlockJUnit4ClassRunner {
+    @Override
+    protected void runChild(FrameworkMethod mth, RunNotifier rn) {
+      TestTag tag = mth.getAnnotation(TestTag.class);
+      if (tag != null && tag.value().length > 0) {
+        if (shouldSkip(tag.value())) {
+          rn.fireTestIgnored(this.describeChild(mth));
+          return;
+        }
+      }
+
+      super.runChild(mth, rn);
+    }
+
+    public SelectiveRunner(Class<?> klass) throws InitializationError {
+      super(klass);
+    }
+  }
+
+  private Runner runner;
+
+  public TagRunner(Class<?> testClass) throws InitializationError {
+    this.runner = new SelectiveRunner(testClass);
+  }
+
+  @Override
+  public Description getDescription() {
+    return runner.getDescription();
+  }
+
+  @Override
+  public void run(RunNotifier rn) {
+    runner.run(rn);
+  }
+
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java Wed Mar 12 21:17:13 2014
@@ -138,7 +138,7 @@ public class TestByteBloomFilter extends
 
     // test: foldFactor > log(max/actual)
   }
-
+  
   public void testSizing() {
     int bitSize = 8 * 128 * 1024; // 128 KB
     double errorRate = 0.025; // target false positive rate
@@ -155,7 +155,7 @@ public class TestByteBloomFilter extends
     // The bit size comes out a little different due to rounding.
     assertTrue(Math.abs(bitSize2 - bitSize) * 1.0 / bitSize < 1e-5);
   }
-
+  
   public void testFoldableByteSize() {
     assertEquals(128, ByteBloomFilter.computeFoldableByteSize(1000, 5));
     assertEquals(640, ByteBloomFilter.computeFoldableByteSize(5001, 4));

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java Wed Mar 12 21:17:13 2014
@@ -170,7 +170,7 @@ public class TestBytes extends TestCase 
           Bytes.BYTES_RAWCOMPARATOR));
     }
   }
-
+  
   public void testStartsWith() {
     assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("h")));
     assertTrue(Bytes.startsWith(Bytes.toBytes("hello"), Bytes.toBytes("")));
@@ -214,7 +214,7 @@ public class TestBytes extends TestCase 
 
     return (Bytes.toLong(testValue) + amount) == incrementResult;
   }
-
+  
   public void testFixedSizeString() throws IOException {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     DataOutputStream dos = new DataOutputStream(baos);

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java Wed Mar 12 21:17:13 2014
@@ -76,7 +76,7 @@ public class TestMiniClusterLoadSequenti
     this.dataBlockEncoding = dataBlockEncoding;
     conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
 
-    // We don't want any region reassignments by the load balancer during the test.
+    // We don't want any region reassignments by the load balancer during the test. 
     conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, 10.0f);
   }
 

Copied: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestTag.java (from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestTag.java?p2=hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestTag.java&p1=hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java&r1=1576907&r2=1576909&rev=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestTag.java Wed Mar 12 21:17:13 2014
@@ -1,5 +1,5 @@
 /**
- * Copyright 2011 The Apache Software Foundation
+ * Copyright 2014 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -17,20 +17,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.hbase.util;
 
-package org.apache.hadoop.hbase.io.hfile;
-
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
-
-public interface Cacheable extends HeapSize {
-  /**
-   * @return the block type of this cached HFile block
-   */
-  public BlockType getBlockType();
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
 
+/**
+ * The annotation class specifying a tag for a testcase. The case annotated with
+ * not be called by default. One has to set one of the tags to make it tested.
+ *
+ * A tag is a string not containing semicolon and spaces.
+ *
+ * See comments of TagRunner for more information.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ ElementType.METHOD })
+public @interface TestTag {
   /**
-   * @return the metrics object identified by table and column family
+   * The list of tags.
    */
-  public SchemaMetrics getSchemaMetrics();
+  String[] value();
 }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java Wed Mar 12 21:17:13 2014
@@ -72,3 +72,4 @@ public class TestThreads {
         timeElapsed);
   }
 }
+

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TitanUserInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TitanUserInfo.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TitanUserInfo.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TitanUserInfo.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,63 @@
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.loadtest.RegionSplitter;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ * Given a Titan userid, figure out where the user is located on a cluster. This
+ * is useful for taking userids, which appserver people use, and translating
+ * them to regions & current RS for diagnosis.
+ */
+public class TitanUserInfo {
+  public static void main(String[] args) throws IOException {
+    if (args.length < 1) {
+      System.err.println("Usage: TitanUserInfo <id...@facebook.com> [TABLE_NAME|stop]");
+    }
+    
+    // turn logging level to error so we don't have verbose output
+    Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN);
+    Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.WARN);
+    
+    String userid = args[0].trim();
+    byte[] row = RegionSplitter.getHBaseKeyFromEmail(userid);
+    
+    System.out.println("userid = " + userid);
+    System.out.println("HBase row = " + Bytes.toStringBinary(row));
+
+    Configuration conf = HBaseConfiguration.create();
+    List<String> tableNames = new ArrayList<String>();
+    String filter = (args.length >= 2) ? args[1].trim() : "MailBox";
+
+    if (filter.toLowerCase().equals("stop")) {
+      return;
+    }
+
+    HBaseAdmin hba = new HBaseAdmin(conf);
+    for (HTableDescriptor htd : hba.listTables()) {
+      String curName = Bytes.toString(htd.getName());
+      if (curName.startsWith(filter)) {
+        tableNames.add(curName);
+      }
+    }
+    
+    for (String curName : tableNames) {
+      // get region & regionserver given row + table
+      HTable table = new HTable(curName);
+      HRegionLocation loc = table.getRegionLocation(row);
+      System.out.println(curName + " region = " + loc.getRegionInfo().getRegionNameAsString());
+      System.out.println(curName + " cur server = " + loc.getServerAddress());
+    }
+  }
+}

Copied: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkClient.java (from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkClient.java?p2=hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkClient.java&p1=hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java&r1=1576907&r2=1576909&rev=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkClient.java Wed Mar 12 21:17:13 2014
@@ -1,6 +1,4 @@
 /**
- * Copyright 2010 The Apache Software Foundation
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,18 +15,31 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase;
 
-import java.io.IOException;
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
 
 /**
- * This exception is thrown by the master when a region server reports and is
- * already being processed as dead. This can happen when a region server loses
- * its session but didn't figure it yet.
+ * Benchmark factory interface which represents a generic client which performs
+ * simple gets, puts and scans which will suffice the benchmark.
  */
-public class YouAreDeadException extends IOException {
+public interface BenchmarkClient {
+
+  public Get createGet(byte[] row, byte[] family, byte[] qual);
+
+  public Put createPut(byte[] row, byte[] family, byte[] qual, byte[] value);
+
+  public Result executeGet(Get get);
+
+  public void executePut(Put put);
+
+  public List<Result> executeScan(Scan scan);
 
-  public YouAreDeadException(String message) {
-    super(message);
-  }
+  public Scan createScan(byte[] row, byte[] family, int nbRows);
 }

Copied: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkFactory.java (from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkFactory.java?p2=hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkFactory.java&p1=hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java&r1=1576907&r2=1576909&rev=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkFactory.java Wed Mar 12 21:17:13 2014
@@ -1,6 +1,4 @@
 /**
- * Copyright 2010 The Apache Software Foundation
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,18 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase;
 
-import java.io.IOException;
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import org.apache.hadoop.conf.Configuration;
 
 /**
- * This exception is thrown by the master when a region server reports and is
- * already being processed as dead. This can happen when a region server loses
- * its session but didn't figure it yet.
+ * This is a factory interface which provides a method to create a
+ * BenchmarkClient object.
  */
-public class YouAreDeadException extends IOException {
-
-  public YouAreDeadException(String message) {
-    super(message);
-  }
+public interface BenchmarkFactory {
+  public BenchmarkClient makeBenchmarkClient(byte[] table, Configuration conf);
 }

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCBenchmarkTool.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCBenchmarkTool.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCBenchmarkTool.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCBenchmarkTool.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,472 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.loadtest.ColumnFamilyProperties;
+import org.apache.hadoop.hbase.loadtest.HBaseUtils;
+import org.apache.hadoop.hbase.regionserver.metrics.PercentileMetric;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Histogram;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Tool that runs the benchmarks. This takes the name of a benchmark factory,
+ * and performs a single put and does a lot of gets to retrieve that put.
+ *
+ * We can provide arguments like number of client threads that need to execute,
+ * the number of rounds we need to repeat the experiment for, the length of the
+ * payload and the number of operations we perform.
+ */
+public class HBaseRPCBenchmarkTool extends AbstractHBaseTool {
+
+  private static final Log LOG = LogFactory.getLog(HBaseRPCBenchmarkTool.class);
+
+  private static final long DEFAULT_REPORT_INTERVAL_MS = 10;
+  private static final int DEFAULT_NUM_OPS = 200;
+  private static final int DEFAULT_NUM_THREADS = 10;
+  private static final String DEFAULT_ROW = "rowkey";
+  private static final String DEFAULT_CF = "cf";
+  private static final String DEFAULT_QUAL ="q";
+  private static final String DEFAULT_VALUE = "v";
+  private static final String DEFAULT_TABLENAME = "RPCBenchmarkingTable";
+  private static final int DEFAULT_ZK_PORT =
+      HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT;
+  private static final boolean DEFAULT_DO_PUT = true;
+
+  /**
+   * The following are the command line parameters which this tool takes.
+   */
+  private static final String OPT_CF= "cf";
+  private static final String OPT_QUAL = "q";
+  private static final String OPT_ROW = "r";
+  private static final String OPT_TBL_NAME = "t";
+  private static final String OPT_VALUE = "v";
+  private static final String OPT_CLASS = "c";
+  private static final String OPT_NUM_OPS = "ops";
+  private static final String OPT_NUM_THREADS = "threads";
+  private static final String OPT_REPORT_INTERVAL = "interval";
+  private static final String OPT_NO_PUT = "no_put";
+  private static final String OPT_ZK_QUORUM = "zk";
+  private static final String OPT_ZK_PORT = "zkPort";
+
+  /**
+   * These are values that we get from the command line and
+   * a few other internal state variables.
+   */
+  private Configuration conf;
+  // Initializing a histogram with minimum of 0 seconds and maximum of 1 second.
+  private final Histogram histogram = new Histogram(100, 0,
+      1*1000*1000*1000);
+  private Class<?> factoryCls;
+  private byte[] tblName;
+  private String zkQuorum;
+  private int zkPort;
+  private byte[] row;
+  private byte[] family;
+  private byte[] qual;
+  private byte[] value;
+  private int numOps;
+  private int numThreads;
+  private long reportInterval;
+  private boolean doPut;
+  private AtomicLong sumLatency = new AtomicLong(0);
+  private AtomicLong totalOps = new AtomicLong(0);
+  private long runtimeMs;
+
+  private HBaseRPCBenchmarkTool() {
+  }
+
+  private HBaseRPCBenchmarkTool(Class<?> factoryCls) {
+    this.factoryCls = factoryCls;
+  }
+
+  private HBaseRPCBenchmarkTool(Class<? extends BenchmarkFactory> factoryCls,
+      byte[] tableName, Configuration conf, byte[] row, byte[] cf, byte[] qual,
+      byte[] value, int numOps, int numThreads, long reportIntervalMs,
+      boolean doPut) {
+    this.factoryCls = factoryCls;
+    this.conf = conf;
+    this.tblName = tableName;
+    this.row = row;
+    this.family = cf;
+    this.qual = qual;
+    this.value = value;
+    this.numOps = numOps;
+    this.numThreads = numThreads;
+    this.reportInterval = reportIntervalMs;
+    this.doPut = doPut;
+  }
+
+  /**
+   * Builder class for the HBaseRPCBenchmarkTool.
+   */
+  public static class Builder {
+    private final Class<? extends BenchmarkFactory> factoryCls;
+    private byte[] tableName = Bytes.toBytes(DEFAULT_TABLENAME);
+    private byte[] row = Bytes.toBytes(DEFAULT_ROW);
+    private byte[] cf = Bytes.toBytes(DEFAULT_CF);
+    private byte[] qual = Bytes.toBytes(DEFAULT_QUAL);
+    private byte[] value = Bytes.toBytes(DEFAULT_VALUE);
+    private int numOps = DEFAULT_NUM_OPS;
+    private int numThreads = DEFAULT_NUM_THREADS;
+    private long reportIntervalMs = DEFAULT_REPORT_INTERVAL_MS;
+    private boolean doPut = DEFAULT_DO_PUT;
+    private Configuration conf;
+
+    public Builder(Class<? extends BenchmarkFactory> factoryCls) {
+      this.factoryCls = factoryCls;
+    }
+
+    public Builder withTableName(byte[] tableName) {
+      this.tableName = tableName;
+      return this;
+    }
+
+    public Builder withRow(byte[] row) {
+      this.row = row;
+      return this;
+    }
+
+    public Builder withColumnFamily(byte[] cf) {
+      this.cf = cf;
+      return this;
+    }
+
+    public Builder withQualifier(byte[] qual) {
+      this.qual = qual;
+      return this;
+    }
+
+    public Builder withValue(byte[] value) {
+      this.value = value;
+      return this;
+    }
+
+    public Builder withNumOps(int numOps) {
+      this.numOps = numOps;
+      return this;
+    }
+
+    public Builder withNumThreads(int numThreads) {
+      this.numThreads = numThreads;
+      return this;
+    }
+
+    public Builder withDoPut(boolean doPut) {
+      this.doPut = doPut;
+      return this;
+    }
+
+    public Builder withConf(Configuration conf) {
+      this.conf = conf;
+      return this;
+    }
+
+    public HBaseRPCBenchmarkTool create() {
+      return new HBaseRPCBenchmarkTool(this.factoryCls, this.tableName,
+        this.conf, this.row, this.cf, this.qual, this.value,
+        this.numOps, this.numThreads, this.reportIntervalMs, this.doPut);
+    }
+  }
+
+  /**
+   * Just adds all the following command line parameters.
+   */
+  @Override
+  protected void addOptions() {
+    addOptWithArg(OPT_CLASS, "Benchmark factory class");
+    addOptWithArg(OPT_NUM_THREADS, "Number of threads");
+    addOptWithArg(OPT_NUM_OPS, "Number of operations to execute per thread");
+    addOptWithArg(OPT_TBL_NAME, "Table name to use");
+    addOptWithArg(OPT_ZK_QUORUM, "Table name");
+    addOptWithArg(OPT_ZK_PORT, "Zookeeper Port");
+    addOptWithArg(OPT_REPORT_INTERVAL, "Reporting interval in milliseconds");
+    addOptWithArg(OPT_ROW, "Row key");
+    addOptWithArg(OPT_NO_PUT,
+        "DO NOT perform a single put (writing the value) before the benchmark");
+    addOptWithArg(OPT_CF, "Column family to use");
+    addOptWithArg(OPT_QUAL, "Column qualifier to use");
+    addOptWithArg(OPT_VALUE, "Value to use");
+  }
+
+  /**
+   * Parses the command line options.
+   */
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    conf = HBaseConfiguration.create();
+    // Takes ThriftBenchmarkFactory by default.
+    String className = ThriftBenchmarkFactory.class.getName();
+    if (cmd.hasOption(OPT_CLASS)) {
+      className = cmd.getOptionValue(OPT_CLASS);
+      LOG.debug("Using class name : " + className);
+    }
+    try {
+      factoryCls = Class.forName(className);
+    } catch (ClassNotFoundException e) {
+      throw new IllegalArgumentException("Can't find a class " + className, e);
+    }
+    this.zkPort = DEFAULT_ZK_PORT;
+    if (cmd.hasOption(OPT_ZK_QUORUM)) {
+      zkQuorum = cmd.getOptionValue(OPT_ZK_QUORUM);
+      conf.set(HConstants.ZOOKEEPER_QUORUM, zkQuorum);
+      conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, this.zkPort);
+      LOG.debug("Adding zookeeper quorum : " + zkQuorum);
+    }
+    reportInterval = parseLong(cmd.getOptionValue(OPT_REPORT_INTERVAL,
+        String.valueOf(DEFAULT_REPORT_INTERVAL_MS)), reportInterval, Long.MAX_VALUE);
+    if (cmd.hasOption(OPT_TBL_NAME)) {
+      tblName = Bytes.toBytes(cmd.getOptionValue(OPT_TBL_NAME));
+    } else {
+      tblName = Bytes.toBytes(DEFAULT_TABLENAME);
+      ColumnFamilyProperties[] familyProperties = new ColumnFamilyProperties[1];
+      familyProperties[0] = new ColumnFamilyProperties();
+      familyProperties[0].familyName = DEFAULT_CF;
+      familyProperties[0].maxVersions = Integer.MAX_VALUE;
+      HBaseUtils.createTableIfNotExists(conf,
+          tblName, familyProperties, 1);
+    }
+    row = Bytes.toBytes(cmd.getOptionValue(OPT_ROW, DEFAULT_ROW));
+    family = Bytes.toBytes(cmd.getOptionValue(OPT_CF, DEFAULT_CF));
+    qual = Bytes.toBytes(cmd.getOptionValue(OPT_QUAL, DEFAULT_QUAL));
+    value = Bytes.toBytes(cmd.getOptionValue(OPT_VALUE, DEFAULT_VALUE));
+    numOps = parseInt(cmd.getOptionValue(OPT_NUM_OPS,
+        String.valueOf(DEFAULT_NUM_OPS)), 1, Integer.MAX_VALUE);
+    numThreads = parseInt(cmd.getOptionValue(OPT_NUM_THREADS,
+        String.valueOf(DEFAULT_NUM_THREADS)), 1, Integer.MAX_VALUE);
+    doPut = !cmd.hasOption(OPT_NO_PUT);
+  }
+
+  /**
+   * Main function which does the benchmarks.
+   * @throws IllegalAccessException
+   * @throws InstantiationException
+   */
+  @Override
+  protected void doWork() throws InterruptedException, InstantiationException, IllegalAccessException {
+    // Initializing the required objects.
+    BenchmarkFactory factory = (BenchmarkFactory) factoryCls.newInstance();
+    LOG.debug("Creating an instance of the factory class : " +
+        factoryCls.getCanonicalName());
+    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
+    BenchmarkClient benchmark = factory.makeBenchmarkClient(tblName, conf);
+
+    // Performing a single put to the region server.
+    if (doPut) {
+      benchmark.executePut(benchmark.createPut(row, family, qual, value));
+      Result r = benchmark.executeGet(benchmark.createGet(row, family, qual));
+      if (Bytes.equals(r.getValue(family, qual), value)) {
+      }
+      doPut = false;
+    }
+    runtimeMs = System.currentTimeMillis();
+
+    // Count down latches which let me synchronize all the benchmark workers to
+    // run together.
+    final AtomicBoolean running = new AtomicBoolean(true);
+    final CountDownLatch readySignal = new CountDownLatch(numThreads);
+    final CountDownLatch startSignal = new CountDownLatch(1);
+    final CountDownLatch doneSignal = new CountDownLatch(numThreads);
+
+    // Spawning the worker threads here.
+    for (int i = 0; i < numThreads; i++) {
+      executor.submit(new WorkerThread(histogram, sumLatency,
+          totalOps, factory, tblName, conf, numOps, reportInterval, row,
+          family, qual, readySignal, startSignal, doneSignal, running));
+    }
+
+    // Here we will wait for all the worker threads to kick off and then we let
+    // the worker threads know that they are free to start their benchmarks.
+    try {
+      // Will wait for all the threads to get ready i.e. start
+      readySignal.await();
+
+      // will signal all the threads to start simultaneously
+      startSignal.countDown();
+
+      // Will wait for all the threads to finish execution upto a certain point
+      doneSignal.await();
+
+      // Will signal the threads to terminate.
+      running.set(false);
+    } catch (InterruptedException e) {
+      LOG.error("Not able to start the worker threads together." +
+          "Probably we were interrupted?");
+    }
+    executor.shutdown();
+    executor.awaitTermination(1, TimeUnit.HOURS);
+    runtimeMs = System.currentTimeMillis() - runtimeMs;
+  }
+
+  public static void printStats(String msg, int numOps, long startTime) {
+    long elapsedSeconds = (System.currentTimeMillis() - startTime);
+    double opsPerMSec = numOps / elapsedSeconds;
+    StringBuilder sb = new StringBuilder();
+    sb.append(msg);
+    sb.append(" throughput : ");
+    sb.append(opsPerMSec);
+    sb.append(" ops/ms.");
+    System.out.println(sb.toString());
+  }
+
+  /**
+   * The worker thread which performs the single thread benchmark.
+   * Once this thread starts, it waits for all the threads to start and then
+   * it starts running the benchmark.
+   */
+  class WorkerThread extends Thread {
+
+    private final Histogram histogram;
+    private final AtomicLong totalLatency;
+    private final AtomicLong totalOps;
+    private final BenchmarkFactory factory;
+    private BenchmarkClient benchmark;
+    private final byte[] tableName;
+    private final Configuration conf;
+    private final int numOps;
+    private final byte[] row;
+    private final byte[] family;
+    private final byte[] qual;
+    private final CountDownLatch readySignal; // To notify the controller that this thread has started
+    private final CountDownLatch startSignal; // To notify this thread that it is free to start
+    private final CountDownLatch doneSignal; // To notify the controller thread to shutdown the threads.
+    private final AtomicBoolean running; // the state variable which tells whether the threads should be running.
+    private boolean signalledDone = false;
+
+    WorkerThread(Histogram histogram,
+        AtomicLong totalLatency,
+        AtomicLong totalOps,
+        BenchmarkFactory benchmarkFactory,
+        byte[] tableName,
+        Configuration conf,
+        int numOps,
+        long reportIntervalMs,
+        byte[] row,
+        byte[] family,
+        byte[] qual,
+        CountDownLatch readySignal,
+        CountDownLatch startSignal,
+        CountDownLatch doneSignal,
+        AtomicBoolean running) {
+      this.factory = benchmarkFactory;
+      this.tableName = tableName;
+      this.conf = conf;
+      this.histogram = histogram;
+      this.totalLatency = totalLatency;
+      this.totalOps = totalOps;
+      this.numOps = numOps;
+      this.row = row;
+      this.family = family;
+      this.qual = qual;
+      this.readySignal = readySignal;
+      this.startSignal = startSignal;
+      this.doneSignal = doneSignal;
+      this.running = running;
+    }
+
+    @Override
+    public void run() {
+      LOG.debug("Worker Thread. numOps:" + numOps);
+      this.benchmark = factory.makeBenchmarkClient(tableName, conf);
+      final long startTime = System.currentTimeMillis();
+      // We let the master know that we are ready.
+      readySignal.countDown();
+      try {
+        // And wait for the master to signal us to start.
+        startSignal.await();
+      } catch (InterruptedException e) {
+        LOG.debug("Interrupted while waiting for the signal");
+        e.printStackTrace();
+      }
+      for (int i = 0; ; ++i) {
+        long opStartNs = System.nanoTime();
+        try {
+          benchmark.executeGet(benchmark.createGet(row, family, qual));
+        } catch (Exception e) {
+          LOG.debug("Encountered exception while performing get");
+          e.printStackTrace();
+          break;
+        }
+        long delta = System.nanoTime() - opStartNs;
+        totalLatency.addAndGet(delta);
+        totalOps.incrementAndGet();
+        histogram.addValue(delta);
+        if (i >= numOps) {
+          if (!signalledDone) {
+            doneSignal.countDown();
+            signalledDone = true;
+          }
+          if (!running.get()) break;
+        }
+      }
+      StringBuilder sb = new StringBuilder();
+      sb.append("Printing statistics for " + factoryCls.getName());
+      sb.append(". Average latency : ");
+      sb.append(sumLatency.get()/totalOps.get());
+      sb.append("ns. ");
+      sb.append("p95 latency : ");
+      sb.append(histogram.getPercentileEstimate(PercentileMetric.P95));
+      sb.append(". p99 latency : ");
+      sb.append(histogram.getPercentileEstimate(PercentileMetric.P99));
+      sb.append(". Throughput : ");
+      sb.append((totalOps.get() * 1000)/
+          (System.currentTimeMillis() - startTime));
+      LOG.debug(sb);
+    }
+  }
+
+  public long getTotalOps() {
+    return totalOps.get();
+  }
+
+  public double getThroughput() {
+    return (this.totalOps.get() * 1000) / (double)this.runtimeMs;
+  }
+
+  public double getAverageLatency() {
+    return this.sumLatency.get() / (double)this.totalOps.get();
+  }
+
+  public double getP95Latency() {
+    return histogram.getPercentileEstimate(PercentileMetric.P95);
+  }
+
+  public double getP99Latency() {
+    return histogram.getPercentileEstimate(PercentileMetric.P99);
+  }
+
+  public static void main(String[] args) {
+    int ret = new HBaseRPCBenchmarkTool().doStaticMain(args);
+    System.exit(ret);
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCProtocolComparison.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCProtocolComparison.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCProtocolComparison.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCProtocolComparison.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.loadtest.ColumnFamilyProperties;
+import org.apache.hadoop.hbase.loadtest.HBaseUtils;
+import org.apache.hadoop.hbase.regionserver.metrics.PercentileMetric;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Histogram;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Benchmark tool which compares various benchmarks by running them together.
+ * This forms a layer over the HBaseRPCBenchmarkTool and will help in
+ * comparison.
+ * Currently it runs the benchmarks in parallel. Functionality can be added to
+ * run them in a serial fashion.
+ */
+public class HBaseRPCProtocolComparison extends AbstractHBaseTool {
+  private static final Log LOG =
+      LogFactory.getLog(HBaseRPCProtocolComparison.class);
+
+  private static final long DEFAULT_REPORT_INTERVAL_MS = 1;
+  private static final int DEFAULT_NUM_OPS = 10000;
+  private static final int DEFAULT_NUM_ROUNDS = 100;
+  private static final int DEFAULT_NUM_THREADS = 10;
+  private static final String DEFAULT_ROW = "rowkey";
+  private static final String DEFAULT_CF = "cf";
+  private static final String DEFAULT_QUAL ="q";
+  private static final String DEFAULT_VALUE = "v";
+  private static final String DEFAULT_TABLENAME = "RPCBenchmarkingTable";
+  private static final int DEFAULT_ZK_PORT = 2181;
+  private static final boolean DEFAULT_DO_PUT = true;
+
+  private static final String OPT_CF= "cf";
+  private static final String OPT_QUAL = "q";
+  private static final String OPT_ROW = "r";
+  private static final String OPT_TBL_NAME = "t";
+  private static final String OPT_VALUE_LENGTH = "vlen";
+  private static final String OPT_CLASSES = "c";
+  private static final String OPT_NUM_OPS = "ops";
+  private static final String OPT_NUM_ROUNDS = "rounds";
+  private static final String OPT_NUM_THREADS = "threads";
+  private static final String OPT_REPORT_INTERVAL = "interval";
+  private static final String OPT_NO_PUT = "no_put";
+  private static final String OPT_ZK_QUORUM = "zk";
+  private static final String OPT_ZK_PORT = "zkPort";
+
+  private Configuration conf;
+  private List<Class<? extends BenchmarkFactory>> factoryClasses;
+  private byte[] tblName;
+  private String zkQuorum;
+  private int zkPort;
+  private byte[] row;
+  private byte[] family;
+  private byte[] qual;
+  private byte[] value;
+  private int valueLength;
+  private int numOps;
+  private int numRounds;
+  private int numThreads;
+  private long reportInterval;
+  private boolean doPut;
+
+  @Override
+  protected void addOptions() {
+    addOptWithArg(OPT_CLASSES, "Benchmark factory classes");
+    addOptWithArg(OPT_NUM_THREADS, "Number of threads");
+    addOptWithArg(OPT_NUM_OPS, "Number of operations to execute per thread");
+    addOptWithArg(OPT_TBL_NAME, "Table name");
+    addOptWithArg(OPT_ZK_QUORUM, "Table name");
+    addOptWithArg(OPT_ZK_PORT, "Zookeeper Port");
+    addOptWithArg(OPT_REPORT_INTERVAL, "Reporting interval in milliseconds");
+    addOptWithArg(OPT_ROW, "Row key");
+    addOptWithArg(OPT_NO_PUT,
+        "DO NOT perform a single put (writing the value) before the benchmark");
+    addOptWithArg(OPT_CF, "Column family to use");
+    addOptWithArg(OPT_QUAL, "Column qualifier to use");
+    addOptWithArg(OPT_VALUE_LENGTH, "Value length to use");
+    addOptWithArg(OPT_NUM_ROUNDS, "Number of rounds to perform the tests");
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    conf = HBaseConfiguration.create();
+    if (!cmd.hasOption(OPT_CLASSES)) {
+      throw new IllegalArgumentException("--" + OPT_CLASSES +
+          " must be specified!");
+    }
+    String classNames = null;
+    if (cmd.hasOption(OPT_CLASSES)) {
+      classNames = cmd.getOptionValue(OPT_CLASSES);
+      LOG.debug("Using class name : " + classNames);
+      try {
+        factoryClasses = new ArrayList<Class<? extends BenchmarkFactory>>();
+        for (String s : classNames.split(",")) {
+          factoryClasses.add(
+              (Class<? extends BenchmarkFactory>)Class.forName(s)
+                .asSubclass(BenchmarkFactory.class));
+        }
+      } catch (ClassNotFoundException e) {
+        throw new IllegalArgumentException("Can't find a class " +
+            classNames, e);
+      }
+    }
+    zkPort = DEFAULT_ZK_PORT;
+    if (cmd.hasOption(OPT_ZK_PORT)) {
+      zkPort = Integer.parseInt(cmd.getOptionValue(OPT_ZK_PORT));
+    }
+    if (cmd.hasOption(OPT_ZK_QUORUM)) {
+      zkQuorum = cmd.getOptionValue(OPT_ZK_QUORUM);
+      conf.set(HConstants.ZOOKEEPER_QUORUM, zkQuorum);
+      conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, this.zkPort);
+      LOG.debug("Adding zookeeper quorum : " + zkQuorum);
+    }
+    reportInterval = parseLong(cmd.getOptionValue(OPT_REPORT_INTERVAL,
+        String.valueOf(DEFAULT_REPORT_INTERVAL_MS)),
+        reportInterval, Long.MAX_VALUE);
+    if (cmd.hasOption(OPT_TBL_NAME)) {
+      tblName = Bytes.toBytes(cmd.getOptionValue(OPT_TBL_NAME));
+    } else {
+      tblName = Bytes.toBytes(DEFAULT_TABLENAME);
+      ColumnFamilyProperties[] familyProperties = new ColumnFamilyProperties[1];
+      familyProperties[0] = new ColumnFamilyProperties();
+      familyProperties[0].familyName = DEFAULT_CF;
+      familyProperties[0].maxVersions = Integer.MAX_VALUE;
+      HBaseUtils.createTableIfNotExists(conf,
+          tblName, familyProperties, 1);
+    }
+    row = Bytes.toBytes(cmd.getOptionValue(OPT_ROW, DEFAULT_ROW));
+    family = Bytes.toBytes(cmd.getOptionValue(OPT_CF, DEFAULT_CF));
+    qual = Bytes.toBytes(cmd.getOptionValue(OPT_QUAL, DEFAULT_QUAL));
+    valueLength = parseInt(cmd.getOptionValue(OPT_VALUE_LENGTH, DEFAULT_VALUE),
+        0, Integer.MAX_VALUE);
+    Random r = new Random();
+    value = new byte[valueLength];
+    r.nextBytes(value);
+    numOps = parseInt(cmd.getOptionValue(OPT_NUM_OPS,
+        String.valueOf(DEFAULT_NUM_OPS)), 1, Integer.MAX_VALUE);
+    numRounds = parseInt(cmd.getOptionValue(OPT_NUM_ROUNDS,
+        String.valueOf(DEFAULT_NUM_ROUNDS)), 1, Integer.MAX_VALUE);
+    numThreads = parseInt(cmd.getOptionValue(OPT_NUM_THREADS,
+        String.valueOf(DEFAULT_NUM_THREADS)), 1, Integer.MAX_VALUE);
+    doPut = DEFAULT_DO_PUT;
+    doPut = !cmd.hasOption(OPT_NO_PUT);
+  }
+
+  /**
+   * A simple wrapper class which can contain the relevant metrics
+   */
+  private static class Stats {
+    public Histogram Histogram = new Histogram(100, 0, 1*1000*1000*1000/*1s*/);
+    public AtomicLong TotalRuntime = new AtomicLong(0);
+    public AtomicLong TotalLatency = new AtomicLong(0);
+    public AtomicLong TotalOps = new AtomicLong(0);
+  }
+
+  /**
+   * The main function that performs the comparative benchmark.
+   * @throws InterruptedException
+   */
+  @Override
+  protected void doWork() throws InterruptedException {
+    final Map<Class<? extends BenchmarkFactory>, Stats> statsMap =
+        new HashMap<Class<? extends BenchmarkFactory>, Stats>();
+    for (final Class<? extends BenchmarkFactory> factoryCls : factoryClasses) {
+      statsMap.put(factoryCls, new Stats());
+    }
+
+    for (int i=0; i<numRounds; i++) {
+      ExecutorService executor = Executors.newFixedThreadPool(numThreads);
+      for (final Class<? extends BenchmarkFactory> factoryCls :
+          factoryClasses) {
+        executor.submit(new Runnable() {
+          @Override
+          public void run() {
+            try {
+              Stats stats = statsMap.get(factoryCls);
+              Histogram hist = stats.Histogram;
+              AtomicLong runTime = stats.TotalRuntime;
+              AtomicLong totalLatency = stats.TotalLatency;
+              AtomicLong totalOps = stats.TotalOps;
+              long startTime = System.currentTimeMillis();
+              HBaseRPCBenchmarkTool tool = new HBaseRPCBenchmarkTool
+                .Builder(factoryCls).withColumnFamily(family).withNumOps(numOps)
+                .withRow(row).withNumThreads(numThreads).withConf(conf)
+                .withQualifier(qual).withTableName(tblName).withValue(value)
+                .withDoPut(doPut).create();
+              tool.doWork();
+              hist.addValue(tool.getP95Latency());
+              runTime.addAndGet(System.currentTimeMillis() - startTime);
+              totalLatency.addAndGet((long)tool.getAverageLatency());
+              totalOps.addAndGet(tool.getTotalOps());
+            } catch (InterruptedException
+                | InstantiationException | IllegalAccessException e) {
+              LOG.debug("Cannot run the tool for factory : "
+                 + factoryCls.getName());
+              e.printStackTrace();
+            }
+          }
+        });
+      }
+      executor.shutdown();
+      executor.awaitTermination(1, TimeUnit.HOURS);
+    }
+    for (Entry<Class<? extends BenchmarkFactory>, Stats> entry :
+        statsMap.entrySet()) {
+      Stats s = entry.getValue();
+      System.out.println(entry.getKey().getName() +
+          " : Printing stats for " + numRounds + ":" + numOps + ":" +
+          valueLength + ":" + numThreads + ":"
+          + numRounds + " rounds." +
+          " Average Latency : " +
+          (s.TotalLatency.get() / ((double)numRounds * 1000 * 1000)) +
+          "ms . throughput : " +
+          ((entry.getValue().TotalOps.get() * 1000) /
+              (double)entry.getValue().TotalRuntime.get()) +
+          "ops/s. p95 of p95 : " +
+          entry.getValue().Histogram.getPercentileEstimate(
+              PercentileMetric.P95));
+    }
+  }
+
+  /**
+   * @param args
+   */
+  public static void main(String[] args) {
+    int ret = new HBaseRPCProtocolComparison().doStaticMain(args);
+    System.exit(ret);
+  }
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkClient.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkClient.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkClient.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+
+/**
+ * This is a BenchmarkClient which performs RPC operations through HadoopRPC.
+ */
+public class HadoopRPCBenchmarkClient implements BenchmarkClient {
+  private static final Log LOG = LogFactory.getLog(ThriftBenchmarkClient.class);
+  private HTable htable = null;
+  HadoopRPCBenchmarkClient(HTable htable) {
+    this.htable = htable;
+    this.htable.setAutoFlush(true);
+  }
+
+  // Performing a get through thrift
+  @Override
+  public Result executeGet(Get get) {
+    Result r = null;
+    try {
+      r = this.htable.get(get);
+    } catch (IOException e) {
+      LOG.debug("Unable to perform get");
+      e.printStackTrace();
+    }
+    return r;
+  }
+
+  // Performing a put through hadoop rpc.
+  @Override
+  public void executePut(Put put) {
+    try {
+      this.htable.put(put);
+    } catch (IOException e) {
+      LOG.debug("Unable to perform put");
+      e.printStackTrace();
+    }
+  }
+
+  public Get createGet(byte[] row, byte[] family, byte[] qual) {
+    Get g = new Get(row);
+    g.addColumn(family, qual);
+    return g;
+  }
+
+  public Put createPut(byte[] row, byte[] family, byte[] qual, byte[] value) {
+    Put p = new Put(row);
+    p.add(family, qual, value);
+    return p;
+  }
+
+  @Override
+  public List<Result> executeScan(Scan scan) {
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public Scan createScan(byte[] row, byte[] family, int nbRows) {
+    throw new NotImplementedException();
+  }
+
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkFactory.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkFactory.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkFactory.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkFactory.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTable;
+
+/**
+ * BenchmarkFactory which can create a HadoopRPCBenchmarClient to perform
+ * benchmarks on the hadoop rpc.
+ */
+public class HadoopRPCBenchmarkFactory implements BenchmarkFactory {
+
+  private static final Log LOG = LogFactory.getLog(ThriftBenchmarkFactory.class);
+  @Override
+  public BenchmarkClient makeBenchmarkClient(byte[] table, Configuration conf) {
+    HTable htable;
+    try {
+      Configuration c = HBaseConfiguration.create(conf);
+      c.setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT, false);
+      htable = new HTable(c, table);
+    } catch (IOException e) {
+      LOG.debug("Unabe to create an HTable client please" +
+          "check the error trace for signs of problems.");
+      e.printStackTrace();
+      return null;
+    }
+    return new HadoopRPCBenchmarkClient(htable);
+  }
+
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkClient.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkClient.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkClient.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+
+/**
+ * Implements the BenchmarkClient interface and provides functions to perform
+ * gets and puts.
+ *
+ */
+public class ThriftBenchmarkClient implements BenchmarkClient {
+  private static final Log LOG = LogFactory.getLog(ThriftBenchmarkClient.class);
+  private HTable htable = null;
+
+  ThriftBenchmarkClient(HTable htable) {
+    this.htable = htable;
+    this.htable.setAutoFlush(true);
+  }
+
+  @Override
+  public Result executeGet(Get get) {
+    Result r = null;
+    try {
+      r = this.htable.get(get);
+    } catch (IOException e) {
+      LOG.debug("Unable to perform get");
+      e.printStackTrace();
+    }
+    return r;
+  }
+
+  @Override
+  public void executePut(Put put) {
+    try {
+      this.htable.put(put);
+    } catch (IOException e) {
+      LOG.debug("Unable to perform put");
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * TODO: make use of qual or get rid of it
+   */
+  public Get createGet(byte[] row, byte[] family, byte[] qual) {
+    return new Get.Builder(row).addFamily(family).create();
+  }
+
+  public Put createPut(byte[] row, byte[] family, byte[] qual, byte[] value) {
+    Put p = new Put(row);
+    p.add(family, qual, value);
+    return p;
+  }
+
+  @Override
+  public List<Result> executeScan(Scan scan) {
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public Scan createScan(byte[] row, byte[] family, int nbRows) {
+    throw new NotImplementedException();
+  }
+
+}

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkFactory.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkFactory.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkFactory.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkFactory.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util.rpcbench;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTable;
+
+/**
+ * Factory class which can create a ThriftBenchmarkClient which performs
+ * simple operations via thrift.
+ */
+public class ThriftBenchmarkFactory implements BenchmarkFactory {
+  private static final Log LOG = LogFactory.getLog(ThriftBenchmarkFactory.class);
+
+  @Override
+  public BenchmarkClient makeBenchmarkClient(byte[] table, Configuration conf) {
+    HTable htable;
+    try {
+      Configuration c = HBaseConfiguration.create(conf);
+      htable = new HTable(c, table);
+    } catch (IOException e) {
+      LOG.debug("Unabe to create an HTable client please" +
+          "check the error trace for signs of problems.");
+      e.printStackTrace();
+      return null;
+    }
+    return new ThriftBenchmarkClient(htable);
+  }
+}

Modified: hbase/branches/0.89-fb/src/test/resources/hbase-site.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/resources/hbase-site.xml?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/resources/hbase-site.xml (original)
+++ hbase/branches/0.89-fb/src/test/resources/hbase-site.xml Wed Mar 12 21:17:13 2014
@@ -134,17 +134,17 @@
     The port at which the clients will connect.
     </description>
   </property>
-
+  
   <property>
     <name>hbase.server.thread.wakefrequency</name>
     <value>100</value>
   </property>
-
+  
   <property>
     <name>zookeeper.session.timeout</name>
     <value>60000</value>
   </property>
-
+  
   <property>
     <name>hbase.zookeeper.sessionExpired.abortProcess</name>
     <value>false</value>