You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2015/12/29 23:15:48 UTC
hbase git commit: HBASE-15031 Fix merge of MVCC and SequenceID
performance regression in branch-1.0; Addendum: forgot to git add test files
Repository: hbase
Updated Branches:
refs/heads/branch-1.0 f5628e5ab -> e0f019073
HBASE-15031 Fix merge of MVCC and SequenceID performance regression in branch-1.0; Addendum: forgot to git add test files
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e0f01907
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e0f01907
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e0f01907
Branch: refs/heads/branch-1.0
Commit: e0f0190733032a8bcd2bb14670a0f7dc840e4bc5
Parents: f5628e5
Author: stack <st...@apache.org>
Authored: Tue Dec 29 14:15:33 2015 -0800
Committer: stack <st...@apache.org>
Committed: Tue Dec 29 14:15:33 2015 -0800
----------------------------------------------------------------------
.../hadoop/hbase/IncrementPerformanceTest.java | 129 ++++++
...tIncrementFromClientSideWithCoprocessor.java | 49 +++
.../client/TestIncrementsFromClientSide.java | 433 +++++++++++++++++++
.../hbase/regionserver/TestRegionIncrement.java | 254 +++++++++++
4 files changed, 865 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e0f01907/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
new file mode 100644
index 0000000..bf3a44f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+// import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.MetricName;
+import com.yammer.metrics.core.Timer;
+import com.yammer.metrics.core.TimerContext;
+import com.yammer.metrics.stats.Snapshot;
+
+/**
+ * Simple Increments Performance Test. Run this from main. It is to go against a cluster.
+ * Presumption is the table exists already. Defaults are a zk ensemble of localhost:2181,
+ * a tableName of 'tableName', a column famly name of 'columnFamilyName', with 80 threads by
+ * default and 10000 increments per thread. To change any of these configs, pass -DNAME=VALUE as
+ * in -DtableName="newTableName". It prints out configuration it is running with at the start and
+ * on the end it prints out percentiles.
+ */
+public class IncrementPerformanceTest implements Tool {
+ private static final Log LOG = LogFactory.getLog(IncrementPerformanceTest.class);
+ private static final byte [] QUALIFIER = new byte [] {'q'};
+ private Configuration conf;
+ private final MetricName metricName = new MetricName(this.getClass(), "increment");
+ private static final String TABLENAME = "tableName";
+ private static final String COLUMN_FAMILY = "columnFamilyName";
+ private static final String THREAD_COUNT = "threadCount";
+ private static final int DEFAULT_THREAD_COUNT = 80;
+ private static final String INCREMENT_COUNT = "incrementCount";
+ private static final int DEFAULT_INCREMENT_COUNT = 10000;
+
+ IncrementPerformanceTest() {}
+
+ public int run(final String [] args) throws Exception {
+ Configuration conf = getConf();
+ final TableName tableName = TableName.valueOf(conf.get(TABLENAME), TABLENAME);
+ final byte [] columnFamilyName = Bytes.toBytes(conf.get(COLUMN_FAMILY, COLUMN_FAMILY));
+ int threadCount = conf.getInt(THREAD_COUNT, DEFAULT_THREAD_COUNT);
+ final int incrementCount = conf.getInt(INCREMENT_COUNT, DEFAULT_INCREMENT_COUNT);
+ LOG.info("Running test with " + HConstants.ZOOKEEPER_QUORUM + "=" +
+ getConf().get(HConstants.ZOOKEEPER_QUORUM) + ", tableName=" + tableName +
+ ", columnFamilyName=" + columnFamilyName + ", threadCount=" + threadCount +
+ ", incrementCount=" + incrementCount);
+
+ ExecutorService service = Executors.newFixedThreadPool(threadCount);
+ Set<Future<?>> futures = new HashSet<Future<?>>();
+ final AtomicInteger integer = new AtomicInteger(0); // needed a simple "final" counter
+ while (integer.incrementAndGet() <= threadCount) {
+ futures.add(service.submit(new Runnable() {
+ @Override
+ public void run() {
+ HTable table;
+ try {
+ // ConnectionFactory.createConnection(conf).getTable(TableName.valueOf(TABLE_NAME));
+ table = new HTable(getConf(), tableName.getName());
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ Timer timer = Metrics.newTimer(metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS);
+ for (int i = 0; i < incrementCount; i++) {
+ byte[] row = Bytes.toBytes(i);
+ TimerContext context = timer.time();
+ try {
+ table.incrementColumnValue(row, columnFamilyName, QUALIFIER, 1l);
+ } catch (IOException e) {
+ // swallow..it's a test.
+ } finally {
+ context.stop();
+ }
+ }
+ }
+ }));
+ }
+
+ for(Future<?> future : futures) future.get();
+ service.shutdown();
+ Snapshot s = Metrics.newTimer(this.metricName,
+ TimeUnit.MILLISECONDS, TimeUnit.SECONDS).getSnapshot();
+ LOG.info(String.format("75th=%s, 95th=%s, 99th=%s", s.get75thPercentile(),
+ s.get95thPercentile(), s.get99thPercentile()));
+ return 0;
+ }
+
+ @Override
+ public Configuration getConf() {
+ return this.conf;
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.exit(ToolRunner.run(HBaseConfiguration.create(), new IncrementPerformanceTest(), args));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/e0f01907/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
new file mode 100644
index 0000000..a67cc45
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Before;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test all {@link Increment} client operations with a coprocessor that
+ * just implements the default flush/compact/scan policy.
+ *
+ * This test takes a long time. The test it derives from is parameterized so we run through both
+ * options of the test.
+ */
+@Category(LargeTests.class)
+public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFromClientSide {
+ public TestIncrementFromClientSideWithCoprocessor(final boolean fast) {
+ super(fast);
+ }
+
+ @Before
+ public void before() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
+ conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
+ super.before();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/e0f01907/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
new file mode 100644
index 0000000..f9461bc
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
@@ -0,0 +1,433 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * Run Increment tests that use the HBase clients; {@link HTable}.
+ *
+ * Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we
+ * do a rolling restart of the single regionserver so that it can pick up the go fast configuration.
+ * Doing it this way should be faster than starting/stopping a cluster per test.
+ *
+ * Test takes a long time because spin up a cluster between each run -- ugh.
+ */
+@RunWith(Parameterized.class)
+@Category(LargeTests.class)
+@SuppressWarnings ("deprecation")
+public class TestIncrementsFromClientSide {
+ final Log LOG = LogFactory.getLog(getClass());
+ protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static byte [] ROW = Bytes.toBytes("testRow");
+ private static byte [] FAMILY = Bytes.toBytes("testFamily");
+ // This test depends on there being only one slave running at at a time. See the @Before
+ // method where we do rolling restart.
+ protected static int SLAVES = 1;
+ private String oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY;
+ @Rule public TestName name = new TestName();
+ @Parameters(name = "fast={0}")
+ public static Collection<Object []> data() {
+ return Arrays.asList(new Object[] {Boolean.FALSE}, new Object [] {Boolean.TRUE});
+ }
+ private final boolean fast;
+
+ public TestIncrementsFromClientSide(final boolean fast) {
+ this.fast = fast;
+ }
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ MultiRowMutationEndpoint.class.getName());
+ conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
+ // We need more than one region server in this test
+ TEST_UTIL.startMiniCluster(SLAVES);
+ }
+
+ @Before
+ public void before() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ if (this.fast) {
+ // If fast is set, set our configuration and then do a rolling restart of the one
+ // regionserver so it picks up the new config. Doing this should be faster than starting
+ // and stopping a cluster for each test.
+ this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
+ conf.get(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
+ conf.setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
+ HRegionServer rs =
+ TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
+ TEST_UTIL.getHBaseCluster().startRegionServer();
+ rs.stop("Restart");
+ while(!rs.isStopped()) {
+ Threads.sleep(100);
+ LOG.info("Restarting " + rs);
+ }
+ TEST_UTIL.waitUntilNoRegionsInTransition(10000);
+ }
+ }
+
+ @After
+ public void after() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ if (this.fast) {
+ if (this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY != null) {
+ conf.set(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY,
+ this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
+ }
+ }
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void afterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testIncrementWithDeletes() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+ final byte[] COLUMN = Bytes.toBytes("column");
+
+ ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
+ TEST_UTIL.flush(TABLENAME);
+
+ Delete del = new Delete(ROW);
+ ht.delete(del);
+
+ ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
+
+ Get get = new Get(ROW);
+ if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
+ Result r = ht.get(get);
+ assertEquals(1, r.size());
+ assertEquals(5, Bytes.toLong(r.getValue(FAMILY, COLUMN)));
+ }
+
+ @Test
+ public void testIncrementingInvalidValue() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+ final byte[] COLUMN = Bytes.toBytes("column");
+ Put p = new Put(ROW);
+ // write an integer here (not a Long)
+ p.add(FAMILY, COLUMN, Bytes.toBytes(5));
+ ht.put(p);
+ try {
+ ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
+ fail("Should have thrown DoNotRetryIOException");
+ } catch (DoNotRetryIOException iox) {
+ // success
+ }
+ Increment inc = new Increment(ROW);
+ inc.addColumn(FAMILY, COLUMN, 5);
+ try {
+ ht.increment(inc);
+ fail("Should have thrown DoNotRetryIOException");
+ } catch (DoNotRetryIOException iox) {
+ // success
+ }
+ }
+
+ @Test
+ public void testIncrementInvalidArguments() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+ final byte[] COLUMN = Bytes.toBytes("column");
+ try {
+ // try null row
+ ht.incrementColumnValue(null, FAMILY, COLUMN, 5);
+ fail("Should have thrown IOException");
+ } catch (IOException iox) {
+ // success
+ }
+ try {
+ // try null family
+ ht.incrementColumnValue(ROW, null, COLUMN, 5);
+ fail("Should have thrown IOException");
+ } catch (IOException iox) {
+ // success
+ }
+ try {
+ // try null qualifier
+ ht.incrementColumnValue(ROW, FAMILY, null, 5);
+ fail("Should have thrown IOException");
+ } catch (IOException iox) {
+ // success
+ }
+ // try null row
+ try {
+ Increment incNoRow = new Increment((byte [])null);
+ incNoRow.addColumn(FAMILY, COLUMN, 5);
+ fail("Should have thrown IllegalArgumentException");
+ } catch (IllegalArgumentException iax) {
+ // success
+ } catch (NullPointerException npe) {
+ // success
+ }
+ // try null family
+ try {
+ Increment incNoFamily = new Increment(ROW);
+ incNoFamily.addColumn(null, COLUMN, 5);
+ fail("Should have thrown IllegalArgumentException");
+ } catch (IllegalArgumentException iax) {
+ // success
+ }
+ // try null qualifier
+ try {
+ Increment incNoQualifier = new Increment(ROW);
+ incNoQualifier.addColumn(FAMILY, null, 5);
+ fail("Should have thrown IllegalArgumentException");
+ } catch (IllegalArgumentException iax) {
+ // success
+ }
+ }
+
+ @Test
+ public void testIncrementOutOfOrder() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+ byte [][] QUALIFIERS = new byte [][] {
+ Bytes.toBytes("B"), Bytes.toBytes("A"), Bytes.toBytes("C")
+ };
+
+ Increment inc = new Increment(ROW);
+ for (int i=0; i<QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify expected results
+ Get get = new Get(ROW);
+ if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
+ Result r = ht.get(get);
+ Cell [] kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[1], 1);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[0], 1);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1);
+
+ // Now try multiple columns again
+ inc = new Increment(ROW);
+ for (int i=0; i<QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[1], 2);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[0], 2);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
+ }
+
+ @Test
+ public void testIncrementOnSameColumn() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final byte[] TABLENAME = Bytes.toBytes(filterStringSoTableNameSafe(this.name.getMethodName()));
+ HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+ byte[][] QUALIFIERS =
+ new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") };
+
+ Increment inc = new Increment(ROW);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify expected results
+ Get get = new Get(ROW);
+ if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
+ Result r = ht.get(get);
+ Cell[] kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 1);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1);
+
+ // Now try multiple columns again
+ inc = new Increment(ROW);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 2);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 2);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
+
+ ht.close();
+ }
+
+ @Test
+ public void testIncrement() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+ byte [][] ROWS = new byte [][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"),
+ Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+ Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i")
+ };
+ byte [][] QUALIFIERS = new byte [][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"),
+ Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+ Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i")
+ };
+
+ // Do some simple single-column increments
+
+ // First with old API
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[0], 1);
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[1], 2);
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[2], 3);
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[3], 4);
+
+ // Now increment things incremented with old and do some new
+ Increment inc = new Increment(ROW);
+ inc.addColumn(FAMILY, QUALIFIERS[1], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[3], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[4], 1);
+ ht.increment(inc);
+
+ // Verify expected results
+ Get get = new Get(ROW);
+ if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
+ Result r = ht.get(get);
+ Cell [] kvs = r.rawCells();
+ assertEquals(5, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 3);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 3);
+ assertIncrementKey(kvs[3], ROW, FAMILY, QUALIFIERS[3], 5);
+ assertIncrementKey(kvs[4], ROW, FAMILY, QUALIFIERS[4], 1);
+
+ // Now try multiple columns by different amounts
+ inc = new Increment(ROWS[0]);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], i+1);
+ }
+ ht.increment(inc);
+ // Verify
+ get = new Get(ROWS[0]);
+ if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(QUALIFIERS.length, kvs.length);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], i+1);
+ }
+
+ // Re-increment them
+ inc = new Increment(ROWS[0]);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], i+1);
+ }
+ ht.increment(inc);
+ // Verify
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(QUALIFIERS.length, kvs.length);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], 2*(i+1));
+ }
+
+ // Verify that an Increment of an amount of zero, returns current count; i.e. same as for above
+ // test, that is: 2 * (i + 1).
+ inc = new Increment(ROWS[0]);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 0);
+ }
+ ht.increment(inc);
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(QUALIFIERS.length, kvs.length);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], 2*(i+1));
+ }
+ }
+
+
+ /**
+ * Call over to the adjacent class's method of same name.
+ */
+ static void assertIncrementKey(Cell key, byte [] row, byte [] family,
+ byte [] qualifier, long value) throws Exception {
+ TestFromClientSide.assertIncrementKey(key, row, family, qualifier, value);
+ }
+
+ public static String filterStringSoTableNameSafe(final String str) {
+ return str.replaceAll("\\[fast\\=(.*)\\]", ".FAST.is.$1");
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/e0f01907/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
new file mode 100644
index 0000000..92285a8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TestIncrementsFromClientSide;
+import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+
+/**
+ * Increments with some concurrency against a region to ensure we get the right answer.
+ * Test is parameterized to run the fast and slow path increments; if fast,
+ * HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY is true.
+ *
+ * <p>There is similar test up in TestAtomicOperation. It does a test where it has 100 threads
+ * doing increments across two column families all on one row and the increments are connected to
+ * prove atomicity on row.
+ */
+@Category(MediumTests.class)
+@RunWith(Parameterized.class)
+public class TestRegionIncrement {
+ private static final Log LOG = LogFactory.getLog(TestRegionIncrement.class);
+ @Rule public TestName name = new TestName();
+ @Rule public final TestRule timeout =
+ CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+ withLookingForStuckThread(true).build();
+ private static HBaseTestingUtility TEST_UTIL;
+ private final static byte [] INCREMENT_BYTES = Bytes.toBytes("increment");
+ private static final int THREAD_COUNT = 10;
+ private static final int INCREMENT_COUNT = 10000;
+
+ @Parameters(name = "fast={0}")
+ public static Collection<Object []> data() {
+ return Arrays.asList(new Object[] {Boolean.FALSE}, new Object [] {Boolean.TRUE});
+ }
+
+ private final boolean fast;
+
+ public TestRegionIncrement(final boolean fast) {
+ this.fast = fast;
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ TEST_UTIL = HBaseTestingUtility.createLocalHTU();
+ if (this.fast) {
+ TEST_UTIL.getConfiguration().
+ setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
+ }
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TEST_UTIL.cleanupTestDir();
+ }
+
+ private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
+ WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
+ TEST_UTIL.getDataTestDir().toString(), conf);
+ return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
+ HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
+ false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
+ }
+
+ private void closeRegion(final HRegion region) throws IOException {
+ region.close();
+ region.getWAL().close();
+ }
+
+ /**
+ * Increments a single cell a bunch of times.
+ */
+ private static class SingleCellIncrementer extends Thread {
+ private final int count;
+ private final HRegion region;
+ private final Increment increment;
+
+ SingleCellIncrementer(final int i, final int count, final HRegion region,
+ final Increment increment) {
+ super("" + i);
+ setDaemon(true);
+ this.count = count;
+ this.region = region;
+ this.increment = increment;
+ }
+
+ @Override
+ public void run() {
+ for (int i = 0; i < this.count; i++) {
+ try {
+ this.region.increment(this.increment);
+ // LOG.info(getName() + " " + i);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Increments a random row's Cell <code>count</code> times.
+ */
+ private static class CrossRowCellIncrementer extends Thread {
+ private final int count;
+ private final HRegion region;
+ private final Increment [] increments;
+
+ CrossRowCellIncrementer(final int i, final int count, final HRegion region, final int range) {
+ super("" + i);
+ setDaemon(true);
+ this.count = count;
+ this.region = region;
+ this.increments = new Increment[range];
+ for (int ii = 0; ii < range; ii++) {
+ this.increments[ii] = new Increment(Bytes.toBytes(i));
+ this.increments[ii].addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1);
+ }
+ }
+
+ @Override
+ public void run() {
+ for (int i = 0; i < this.count; i++) {
+ try {
+ int index = ThreadLocalRandom.current().nextInt(0, this.increments.length);
+ this.region.increment(this.increments[index]);
+ // LOG.info(getName() + " " + index);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Have each thread update its own Cell. Avoid contention with another thread.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testUnContendedSingleCellIncrement()
+ throws IOException, InterruptedException {
+ final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
+ TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
+ long startTime = System.currentTimeMillis();
+ try {
+ SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT];
+ for (int i = 0; i < threads.length; i++) {
+ byte [] rowBytes = Bytes.toBytes(i);
+ Increment increment = new Increment(rowBytes);
+ increment.addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1);
+ threads[i] = new SingleCellIncrementer(i, INCREMENT_COUNT, region, increment);
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ RegionScanner regionScanner = region.getScanner(new Scan());
+ List<Cell> cells = new ArrayList<Cell>(THREAD_COUNT);
+ while(regionScanner.next(cells)) continue;
+ assertEquals(THREAD_COUNT, cells.size());
+ long total = 0;
+ for (Cell cell: cells) total += Bytes.toLong(cell.getValue());
+ assertEquals(INCREMENT_COUNT * THREAD_COUNT, total);
+ } finally {
+ closeRegion(region);
+ LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms");
+ }
+ }
+
+ /**
+ * Have each thread update its own Cell. Avoid contention with another thread.
+ * This is
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testContendedAcrossCellsIncrement()
+ throws IOException, InterruptedException {
+ final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
+ TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
+ long startTime = System.currentTimeMillis();
+ try {
+ CrossRowCellIncrementer [] threads = new CrossRowCellIncrementer[THREAD_COUNT];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new CrossRowCellIncrementer(i, INCREMENT_COUNT, region, THREAD_COUNT);
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ RegionScanner regionScanner = region.getScanner(new Scan());
+ List<Cell> cells = new ArrayList<Cell>(100);
+ while(regionScanner.next(cells)) continue;
+ assertEquals(THREAD_COUNT, cells.size());
+ long total = 0;
+ for (Cell cell: cells) total += Bytes.toLong(cell.getValue());
+ assertEquals(INCREMENT_COUNT * THREAD_COUNT, total);
+ } finally {
+ closeRegion(region);
+ LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms");
+ }
+ }
+}
\ No newline at end of file