You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2016/02/02 20:27:11 UTC
hbase git commit: HBASE-15196 HBASE-15158 Preamble 2 of 2:Add
Increment tests
Repository: hbase
Updated Branches:
refs/heads/master 243e6cc52 -> ed46591f3
HBASE-15196 HBASE-15158 Preamble 2 of 2:Add Increment tests
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ed46591f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ed46591f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ed46591f
Branch: refs/heads/master
Commit: ed46591f30d4d66e89bc8157fc866c2aad77ce2c
Parents: 243e6cc
Author: stack <st...@apache.org>
Authored: Tue Feb 2 11:27:03 2016 -0800
Committer: stack <st...@apache.org>
Committed: Tue Feb 2 11:27:03 2016 -0800
----------------------------------------------------------------------
.../hadoop/hbase/client/TestIncrement.java | 2 +-
.../main/java/org/apache/hadoop/hbase/Tag.java | 2 +-
.../hadoop/hbase/regionserver/HRegion.java | 2 +-
.../MultiVersionConcurrencyControl.java | 5 +-
.../hadoop/hbase/regionserver/Region.java | 2 +-
.../hadoop/hbase/regionserver/wal/FSHLog.java | 11 +-
.../hadoop/hbase/regionserver/wal/HLogKey.java | 2 +-
.../hbase/regionserver/wal/ReplayHLogKey.java | 5 +-
.../hadoop/hbase/IncrementPerformanceTest.java | 128 +++++++
.../hadoop/hbase/client/TestFromClientSide.java | 264 +------------
.../hbase/client/TestFromClientSide3.java | 1 -
.../TestFromClientSideWithCoprocessor.java | 2 +-
...tIncrementFromClientSideWithCoprocessor.java | 44 +++
.../client/TestIncrementsFromClientSide.java | 373 +++++++++++++++++++
.../hbase/regionserver/TestAtomicOperation.java | 42 ++-
.../hadoop/hbase/regionserver/TestHRegion.java | 8 +-
.../regionserver/TestHRegionReplayEvents.java | 35 +-
.../hbase/regionserver/TestRegionIncrement.java | 245 ++++++++++++
.../regionserver/TestRegionReplicaFailover.java | 1 +
.../hadoop/hbase/regionserver/TestTags.java | 2 +-
.../hbase/regionserver/wal/TestFSHLog.java | 4 +-
.../hbase/regionserver/wal/TestWALReplay.java | 2 +-
.../replication/TestReplicationSmallTests.java | 6 +-
...egionReplicaReplicationEndpointNoMaster.java | 1 -
24 files changed, 865 insertions(+), 324 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java
index 4b9f113..c38340d 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java
@@ -30,7 +30,7 @@ import org.junit.experimental.categories.Category;
@Category({ClientTests.class, SmallTests.class})
public class TestIncrement {
@Test
- public void test() {
+ public void testIncrementInstance() {
final long expected = 13;
Increment inc = new Increment(new byte [] {'r'});
int total = 0;
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
index 1d55baa..c6698f5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
@@ -75,4 +75,4 @@ public interface Tag {
* @return The {@link java.nio.ByteBuffer} containing the value bytes.
*/
ByteBuffer getValueByteBuffer();
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c93123c..3cf4122 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1275,7 +1275,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
@Override
public long getReadPoint(IsolationLevel isolationLevel) {
- if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+ if (isolationLevel != null && isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
// This scan can read even uncommitted transactions
return Long.MAX_VALUE;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
index eba99e0..bf295e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
@@ -155,7 +155,7 @@ public class MultiVersionConcurrencyControl {
* changes completely) so we can clean up the outstanding transaction.
*
* How much is the read point advanced?
- *
+ *
* Let S be the set of all write numbers that are completed. Set the read point to the highest
* numbered write of S.
*
@@ -166,7 +166,6 @@ public class MultiVersionConcurrencyControl {
public boolean complete(WriteEntry writeEntry) {
synchronized (writeQueue) {
writeEntry.markCompleted();
-
long nextReadValue = NONE;
boolean ranOnce = false;
while (!writeQueue.isEmpty()) {
@@ -279,4 +278,4 @@ public class MultiVersionConcurrencyControl {
ClassSize.OBJECT +
2 * Bytes.SIZEOF_LONG +
2 * ClassSize.REFERENCE);
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index c0bc8fe..213b41a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -156,7 +156,7 @@ public interface Region extends ConfigurationObserver {
/** @return true if loading column families on demand by default */
boolean isLoadingCfsOnDemandDefault();
- /** @return readpoint considering given IsolationLevel */
+ /** @return readpoint considering given IsolationLevel; pass null for default*/
long getReadPoint(IsolationLevel isolationLevel);
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 47e28b3..09da8fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -1,5 +1,4 @@
/**
-
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -508,16 +507,16 @@ public class FSHLog implements WAL {
FSUtils.getDefaultBlockSize(this.fs, this.fullPathLogDir));
this.logrollsize =
(long)(blocksize * conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f));
-
+
float memstoreRatio = conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY,
- conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_OLD_KEY,
+ conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_OLD_KEY,
HeapMemorySizeUtil.DEFAULT_MEMSTORE_SIZE));
boolean maxLogsDefined = conf.get("hbase.regionserver.maxlogs") != null;
if(maxLogsDefined){
LOG.warn("'hbase.regionserver.maxlogs' was deprecated.");
}
- this.maxLogs = conf.getInt("hbase.regionserver.maxlogs",
- Math.max(32, calculateMaxLogFiles(memstoreRatio, logrollsize)));
+ this.maxLogs = conf.getInt("hbase.regionserver.maxlogs",
+ Math.max(32, calculateMaxLogFiles(memstoreRatio, logrollsize)));
this.minTolerableReplication = conf.getInt("hbase.regionserver.hlog.tolerable.lowreplication",
FSUtils.getDefaultReplication(fs, this.fullPathLogDir));
this.lowReplicationRollLimit =
@@ -572,7 +571,7 @@ public class FSHLog implements WAL {
int maxLogs = Math.round(mu.getMax() * memstoreSizeRatio * 2 / logRollSize);
return maxLogs;
}
-
+
/**
* Get the backing files associated with this WAL.
* @return may be null if there are no files.
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
index 3e548ad..c094ced 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
@@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
*
* <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
* associated row.
- * @deprecated use WALKey. as of 2.0. Remove in 3.0
+ * @deprecated use WALKey. Deprecated as of 1.0 (HBASE-12522). Remove in 2.0
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
@Deprecated
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
index f7ae208..d5a1561 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.regionserver.wal;
-import java.io.IOException;
import java.util.List;
import java.util.UUID;
@@ -49,7 +48,7 @@ public class ReplayHLogKey extends HLogKey {
* @return long the new assigned sequence number
*/
@Override
- public long getSequenceId() throws IOException {
+ public long getSequenceId() {
return this.getOrigLogSeqNum();
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
new file mode 100644
index 0000000..aed3d0a
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
+
+
+/**
+ * Simple Increments Performance Test. Run this from main. It is to go against a cluster.
+ * Presumption is the table exists already. Defaults are a zk ensemble of localhost:2181,
+ * a tableName of 'tableName', a column famly name of 'columnFamilyName', with 80 threads by
+ * default and 10000 increments per thread. To change any of these configs, pass -DNAME=VALUE as
+ * in -DtableName="newTableName". It prints out configuration it is running with at the start and
+ * on the end it prints out percentiles.
+ */
+public class IncrementPerformanceTest implements Tool {
+ private static final Log LOG = LogFactory.getLog(IncrementPerformanceTest.class);
+ private static final byte [] QUALIFIER = new byte [] {'q'};
+ private Configuration conf;
+ private final MetricRegistry metrics = new MetricRegistry();
+ private static final String TABLENAME = "tableName";
+ private static final String COLUMN_FAMILY = "columnFamilyName";
+ private static final String THREAD_COUNT = "threadCount";
+ private static final int DEFAULT_THREAD_COUNT = 80;
+ private static final String INCREMENT_COUNT = "incrementCount";
+ private static final int DEFAULT_INCREMENT_COUNT = 10000;
+
+ IncrementPerformanceTest() {}
+
+ public int run(final String [] args) throws Exception {
+ Configuration conf = getConf();
+ final TableName tableName = TableName.valueOf(conf.get(TABLENAME), TABLENAME);
+ final byte [] columnFamilyName = Bytes.toBytes(conf.get(COLUMN_FAMILY, COLUMN_FAMILY));
+ int threadCount = conf.getInt(THREAD_COUNT, DEFAULT_THREAD_COUNT);
+ final int incrementCount = conf.getInt(INCREMENT_COUNT, DEFAULT_INCREMENT_COUNT);
+ LOG.info("Running test with " + HConstants.ZOOKEEPER_QUORUM + "=" +
+ getConf().get(HConstants.ZOOKEEPER_QUORUM) + ", tableName=" + tableName +
+ ", columnFamilyName=" + columnFamilyName + ", threadCount=" + threadCount +
+ ", incrementCount=" + incrementCount);
+
+ ExecutorService service = Executors.newFixedThreadPool(threadCount);
+ Set<Future<?>> futures = new HashSet<Future<?>>();
+ final AtomicInteger integer = new AtomicInteger(0); // needed a simple "final" counter
+ while (integer.incrementAndGet() <= threadCount) {
+ futures.add(service.submit(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ try (Connection connection = ConnectionFactory.createConnection(getConf())) {
+ try (Table table = connection.getTable(tableName)) {
+ Timer timer = metrics.timer("increments");
+ for (int i = 0; i < incrementCount; i++) {
+ byte[] row = Bytes.toBytes(i);
+ Timer.Context context = timer.time();
+ try {
+ table.incrementColumnValue(row, columnFamilyName, QUALIFIER, 1l);
+ } catch (IOException e) {
+ // swallow..it's a test.
+ } finally {
+ context.stop();
+ }
+ }
+ }
+ }
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
+ }
+ }
+ }));
+ }
+
+ for(Future<?> future : futures) future.get();
+ service.shutdown();
+ Snapshot s = metrics.timer("increments").getSnapshot();
+ LOG.info(String.format("75th=%s, 95th=%s, 99th=%s", s.get75thPercentile(),
+ s.get95thPercentile(), s.get99thPercentile()));
+ return 0;
+ }
+
+ @Override
+ public Configuration getConf() {
+ return this.conf;
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.exit(ToolRunner.run(HBaseConfiguration.create(), new IncrementPerformanceTest(), args));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 8734aea..63d9cd0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -48,7 +48,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -119,6 +118,7 @@ import org.junit.experimental.categories.Category;
@Category({LargeTests.class, ClientTests.class})
@SuppressWarnings ("deprecation")
public class TestFromClientSide {
+ // NOTE: Increment tests were moved to their own class, TestIncrementsFromClientSide.
private static final Log LOG = LogFactory.getLog(TestFromClientSide.class);
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static byte [] ROW = Bytes.toBytes("testRow");
@@ -3046,7 +3046,7 @@ public class TestFromClientSide {
equals(value, CellUtil.cloneValue(key)));
}
- private void assertIncrementKey(Cell key, byte [] row, byte [] family,
+ static void assertIncrementKey(Cell key, byte [] row, byte [] family,
byte [] qualifier, long value)
throws Exception {
assertTrue("Expected row [" + Bytes.toString(row) + "] " +
@@ -3270,7 +3270,7 @@ public class TestFromClientSide {
return stamps;
}
- private boolean equals(byte [] left, byte [] right) {
+ static boolean equals(byte [] left, byte [] right) {
if (left == null && right == null) return true;
if (left == null && right.length == 0) return true;
if (right == null && left.length == 0) return true;
@@ -4399,264 +4399,6 @@ public class TestFromClientSide {
}
@Test
- public void testIncrementWithDeletes() throws Exception {
- LOG.info("Starting testIncrementWithDeletes");
- final TableName TABLENAME =
- TableName.valueOf("testIncrementWithDeletes");
- Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
- final byte[] COLUMN = Bytes.toBytes("column");
-
- ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
- TEST_UTIL.flush(TABLENAME);
-
- Delete del = new Delete(ROW);
- ht.delete(del);
-
- ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
-
- Get get = new Get(ROW);
- Result r = ht.get(get);
- assertEquals(1, r.size());
- assertEquals(5, Bytes.toLong(r.getValue(FAMILY, COLUMN)));
- }
-
- @Test
- public void testIncrementingInvalidValue() throws Exception {
- LOG.info("Starting testIncrementingInvalidValue");
- final TableName TABLENAME = TableName.valueOf("testIncrementingInvalidValue");
- Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
- final byte[] COLUMN = Bytes.toBytes("column");
- Put p = new Put(ROW);
- // write an integer here (not a Long)
- p.addColumn(FAMILY, COLUMN, Bytes.toBytes(5));
- ht.put(p);
- try {
- ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
- fail("Should have thrown DoNotRetryIOException");
- } catch (DoNotRetryIOException iox) {
- // success
- }
- Increment inc = new Increment(ROW);
- inc.addColumn(FAMILY, COLUMN, 5);
- try {
- ht.increment(inc);
- fail("Should have thrown DoNotRetryIOException");
- } catch (DoNotRetryIOException iox) {
- // success
- }
- }
-
- @Test
- public void testIncrementInvalidArguments() throws Exception {
- LOG.info("Starting testIncrementInvalidArguments");
- final TableName TABLENAME = TableName.valueOf("testIncrementInvalidArguments");
- Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
- final byte[] COLUMN = Bytes.toBytes("column");
- try {
- // try null row
- ht.incrementColumnValue(null, FAMILY, COLUMN, 5);
- fail("Should have thrown IOException");
- } catch (IOException iox) {
- // success
- }
- try {
- // try null family
- ht.incrementColumnValue(ROW, null, COLUMN, 5);
- fail("Should have thrown IOException");
- } catch (IOException iox) {
- // success
- }
- try {
- // try null qualifier
- ht.incrementColumnValue(ROW, FAMILY, null, 5);
- fail("Should have thrown IOException");
- } catch (IOException iox) {
- // success
- }
- // try null row
- try {
- Increment incNoRow = new Increment((byte [])null);
- incNoRow.addColumn(FAMILY, COLUMN, 5);
- fail("Should have thrown IllegalArgumentException");
- } catch (IllegalArgumentException iax) {
- // success
- } catch (NullPointerException npe) {
- // success
- }
- // try null family
- try {
- Increment incNoFamily = new Increment(ROW);
- incNoFamily.addColumn(null, COLUMN, 5);
- fail("Should have thrown IllegalArgumentException");
- } catch (IllegalArgumentException iax) {
- // success
- }
- // try null qualifier
- try {
- Increment incNoQualifier = new Increment(ROW);
- incNoQualifier.addColumn(FAMILY, null, 5);
- fail("Should have thrown IllegalArgumentException");
- } catch (IllegalArgumentException iax) {
- // success
- }
- }
-
- @Test
- public void testIncrementOutOfOrder() throws Exception {
- LOG.info("Starting testIncrementOutOfOrder");
- final TableName TABLENAME = TableName.valueOf("testIncrementOutOfOrder");
- Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
-
- byte [][] QUALIFIERS = new byte [][] {
- Bytes.toBytes("B"), Bytes.toBytes("A"), Bytes.toBytes("C")
- };
-
- Increment inc = new Increment(ROW);
- for (int i=0; i<QUALIFIERS.length; i++) {
- inc.addColumn(FAMILY, QUALIFIERS[i], 1);
- }
- ht.increment(inc);
-
- // Verify expected results
- Result r = ht.get(new Get(ROW));
- Cell [] kvs = r.rawCells();
- assertEquals(3, kvs.length);
- assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[1], 1);
- assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[0], 1);
- assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1);
-
- // Now try multiple columns again
- inc = new Increment(ROW);
- for (int i=0; i<QUALIFIERS.length; i++) {
- inc.addColumn(FAMILY, QUALIFIERS[i], 1);
- }
- ht.increment(inc);
-
- // Verify
- r = ht.get(new Get(ROW));
- kvs = r.rawCells();
- assertEquals(3, kvs.length);
- assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[1], 2);
- assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[0], 2);
- assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
- }
-
- @Test
- public void testIncrementOnSameColumn() throws Exception {
- LOG.info("Starting testIncrementOnSameColumn");
- final TableName TABLENAME = TableName.valueOf("testIncrementOnSameColumn");
- Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
-
- byte[][] QUALIFIERS =
- new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") };
-
- Increment inc = new Increment(ROW);
- for (int i = 0; i < QUALIFIERS.length; i++) {
- inc.addColumn(FAMILY, QUALIFIERS[i], 1);
- inc.addColumn(FAMILY, QUALIFIERS[i], 1);
- }
- ht.increment(inc);
-
- // Verify expected results
- Result r = ht.get(new Get(ROW));
- Cell[] kvs = r.rawCells();
- assertEquals(3, kvs.length);
- assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1);
- assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 1);
- assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1);
-
- // Now try multiple columns again
- inc = new Increment(ROW);
- for (int i = 0; i < QUALIFIERS.length; i++) {
- inc.addColumn(FAMILY, QUALIFIERS[i], 1);
- inc.addColumn(FAMILY, QUALIFIERS[i], 1);
- }
- ht.increment(inc);
-
- // Verify
- r = ht.get(new Get(ROW));
- kvs = r.rawCells();
- assertEquals(3, kvs.length);
- assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 2);
- assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 2);
- assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
-
- ht.close();
- }
-
- @Test
- public void testIncrement() throws Exception {
- LOG.info("Starting testIncrement");
- final TableName TABLENAME = TableName.valueOf("testIncrement");
- Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
-
- byte [][] ROWS = new byte [][] {
- Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"),
- Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
- Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i")
- };
- byte [][] QUALIFIERS = new byte [][] {
- Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"),
- Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
- Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i")
- };
-
- // Do some simple single-column increments
-
- // First with old API
- ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[0], 1);
- ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[1], 2);
- ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[2], 3);
- ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[3], 4);
-
- // Now increment things incremented with old and do some new
- Increment inc = new Increment(ROW);
- inc.addColumn(FAMILY, QUALIFIERS[1], 1);
- inc.addColumn(FAMILY, QUALIFIERS[3], 1);
- inc.addColumn(FAMILY, QUALIFIERS[4], 1);
- ht.increment(inc);
-
- // Verify expected results
- Result r = ht.get(new Get(ROW));
- Cell [] kvs = r.rawCells();
- assertEquals(5, kvs.length);
- assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1);
- assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 3);
- assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 3);
- assertIncrementKey(kvs[3], ROW, FAMILY, QUALIFIERS[3], 5);
- assertIncrementKey(kvs[4], ROW, FAMILY, QUALIFIERS[4], 1);
-
- // Now try multiple columns by different amounts
- inc = new Increment(ROWS[0]);
- for (int i=0;i<QUALIFIERS.length;i++) {
- inc.addColumn(FAMILY, QUALIFIERS[i], i+1);
- }
- ht.increment(inc);
- // Verify
- r = ht.get(new Get(ROWS[0]));
- kvs = r.rawCells();
- assertEquals(QUALIFIERS.length, kvs.length);
- for (int i=0;i<QUALIFIERS.length;i++) {
- assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], i+1);
- }
-
- // Re-increment them
- inc = new Increment(ROWS[0]);
- for (int i=0;i<QUALIFIERS.length;i++) {
- inc.addColumn(FAMILY, QUALIFIERS[i], i+1);
- }
- ht.increment(inc);
- // Verify
- r = ht.get(new Get(ROWS[0]));
- kvs = r.rawCells();
- assertEquals(QUALIFIERS.length, kvs.length);
- for (int i=0;i<QUALIFIERS.length;i++) {
- assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], 2*(i+1));
- }
- }
-
-
- @Test
public void testClientPoolRoundRobin() throws IOException {
final TableName tableName = TableName.valueOf("testClientPoolRoundRobin");
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index c451cf0..7194c57 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.util.ArrayList;
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
index e832590..f313500 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java
@@ -28,7 +28,7 @@ import org.junit.experimental.categories.Category;
/**
* Test all client operations with a coprocessor that
- * just implements the default flush/compact/scan policy
+ * just implements the default flush/compact/scan policy.
*/
@Category({LargeTests.class, ClientTests.class})
public class TestFromClientSideWithCoprocessor extends TestFromClientSide {
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
new file mode 100644
index 0000000..c9bc7c2
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Before;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test all {@link Increment} client operations with a coprocessor that
+ * just implements the default flush/compact/scan policy.
+ *
+ * This test takes a long time. The test it derives from is parameterized so we run through both
+ * options of the test.
+ */
+@Category(LargeTests.class)
+public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFromClientSide {
+ @Before
+ public void before() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
+ conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
new file mode 100644
index 0000000..6b4ee89
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java
@@ -0,0 +1,373 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Run Increment tests that use the HBase clients; {@link HTable}.
+ *
+ * Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we
+ * do a rolling restart of the single regionserver so that it can pick up the go fast configuration.
+ * Doing it this way should be faster than starting/stopping a cluster per test.
+ *
+ * Test takes a long time because spin up a cluster between each run -- ugh.
+ */
+@Category(LargeTests.class)
+public class TestIncrementsFromClientSide {
+ final Log LOG = LogFactory.getLog(getClass());
+ protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static byte [] ROW = Bytes.toBytes("testRow");
+ private static byte [] FAMILY = Bytes.toBytes("testFamily");
+ // This test depends on there being only one slave running at at a time. See the @Before
+ // method where we do rolling restart.
+ protected static int SLAVES = 1;
+ @Rule public TestName name = new TestName();
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ MultiRowMutationEndpoint.class.getName());
+ conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
+ // We need more than one region server in this test
+ TEST_UTIL.startMiniCluster(SLAVES);
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void afterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testIncrementWithDeletes() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+ final byte[] COLUMN = Bytes.toBytes("column");
+
+ ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
+ TEST_UTIL.flush(TABLENAME);
+
+ Delete del = new Delete(ROW);
+ ht.delete(del);
+
+ ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
+
+ Get get = new Get(ROW);
+ Result r = ht.get(get);
+ assertEquals(1, r.size());
+ assertEquals(5, Bytes.toLong(r.getValue(FAMILY, COLUMN)));
+ }
+
+ @Test
+ public void testIncrementingInvalidValue() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+ final byte[] COLUMN = Bytes.toBytes("column");
+ Put p = new Put(ROW);
+ // write an integer here (not a Long)
+ p.addColumn(FAMILY, COLUMN, Bytes.toBytes(5));
+ ht.put(p);
+ try {
+ ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
+ fail("Should have thrown DoNotRetryIOException");
+ } catch (DoNotRetryIOException iox) {
+ // success
+ }
+ Increment inc = new Increment(ROW);
+ inc.addColumn(FAMILY, COLUMN, 5);
+ try {
+ ht.increment(inc);
+ fail("Should have thrown DoNotRetryIOException");
+ } catch (DoNotRetryIOException iox) {
+ // success
+ }
+ }
+
+ @Test
+ public void testIncrementInvalidArguments() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+ final byte[] COLUMN = Bytes.toBytes("column");
+ try {
+ // try null row
+ ht.incrementColumnValue(null, FAMILY, COLUMN, 5);
+ fail("Should have thrown IOException");
+ } catch (IOException iox) {
+ // success
+ }
+ try {
+ // try null family
+ ht.incrementColumnValue(ROW, null, COLUMN, 5);
+ fail("Should have thrown IOException");
+ } catch (IOException iox) {
+ // success
+ }
+ try {
+ // try null qualifier
+ ht.incrementColumnValue(ROW, FAMILY, null, 5);
+ fail("Should have thrown IOException");
+ } catch (IOException iox) {
+ // success
+ }
+ // try null row
+ try {
+ Increment incNoRow = new Increment((byte [])null);
+ incNoRow.addColumn(FAMILY, COLUMN, 5);
+ fail("Should have thrown IllegalArgumentException");
+ } catch (IllegalArgumentException iax) {
+ // success
+ } catch (NullPointerException npe) {
+ // success
+ }
+ // try null family
+ try {
+ Increment incNoFamily = new Increment(ROW);
+ incNoFamily.addColumn(null, COLUMN, 5);
+ fail("Should have thrown IllegalArgumentException");
+ } catch (IllegalArgumentException iax) {
+ // success
+ }
+ // try null qualifier
+ try {
+ Increment incNoQualifier = new Increment(ROW);
+ incNoQualifier.addColumn(FAMILY, null, 5);
+ fail("Should have thrown IllegalArgumentException");
+ } catch (IllegalArgumentException iax) {
+ // success
+ }
+ }
+
+ @Test
+ public void testIncrementOutOfOrder() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+ byte [][] QUALIFIERS = new byte [][] {
+ Bytes.toBytes("B"), Bytes.toBytes("A"), Bytes.toBytes("C")
+ };
+
+ Increment inc = new Increment(ROW);
+ for (int i=0; i<QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify expected results
+ Get get = new Get(ROW);
+ Result r = ht.get(get);
+ Cell [] kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[1], 1);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[0], 1);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1);
+
+ // Now try multiple columns again
+ inc = new Increment(ROW);
+ for (int i=0; i<QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[1], 2);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[0], 2);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
+ }
+
+ @Test
+ public void testIncrementOnSameColumn() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final byte[] TABLENAME = Bytes.toBytes(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLENAME), FAMILY);
+
+ byte[][] QUALIFIERS =
+ new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") };
+
+ Increment inc = new Increment(ROW);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify expected results
+ Get get = new Get(ROW);
+ Result r = ht.get(get);
+ Cell[] kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 1);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1);
+
+ // Now try multiple columns again
+ inc = new Increment(ROW);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[i], 1);
+ }
+ ht.increment(inc);
+
+ // Verify
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(3, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 2);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 2);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2);
+
+ ht.close();
+ }
+
+ @Test
+ public void testIncrement() throws Exception {
+ LOG.info("Starting " + this.name.getMethodName());
+ final TableName TABLENAME =
+ TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+ Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+ byte [][] ROWS = new byte [][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"),
+ Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+ Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i")
+ };
+ byte [][] QUALIFIERS = new byte [][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"),
+ Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+ Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i")
+ };
+
+ // Do some simple single-column increments
+
+ // First with old API
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[0], 1);
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[1], 2);
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[2], 3);
+ ht.incrementColumnValue(ROW, FAMILY, QUALIFIERS[3], 4);
+
+ // Now increment things incremented with old and do some new
+ Increment inc = new Increment(ROW);
+ inc.addColumn(FAMILY, QUALIFIERS[1], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[3], 1);
+ inc.addColumn(FAMILY, QUALIFIERS[4], 1);
+ ht.increment(inc);
+
+ // Verify expected results
+ Get get = new Get(ROW);
+ Result r = ht.get(get);
+ Cell [] kvs = r.rawCells();
+ assertEquals(5, kvs.length);
+ assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1);
+ assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 3);
+ assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 3);
+ assertIncrementKey(kvs[3], ROW, FAMILY, QUALIFIERS[3], 5);
+ assertIncrementKey(kvs[4], ROW, FAMILY, QUALIFIERS[4], 1);
+
+ // Now try multiple columns by different amounts
+ inc = new Increment(ROWS[0]);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], i+1);
+ }
+ ht.increment(inc);
+ // Verify
+ get = new Get(ROWS[0]);
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(QUALIFIERS.length, kvs.length);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], i+1);
+ }
+
+ // Re-increment them
+ inc = new Increment(ROWS[0]);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], i+1);
+ }
+ ht.increment(inc);
+ // Verify
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(QUALIFIERS.length, kvs.length);
+ for (int i=0;i<QUALIFIERS.length;i++) {
+ assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], 2*(i+1));
+ }
+
+ // Verify that an Increment of an amount of zero, returns current count; i.e. same as for above
+ // test, that is: 2 * (i + 1).
+ inc = new Increment(ROWS[0]);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ inc.addColumn(FAMILY, QUALIFIERS[i], 0);
+ }
+ ht.increment(inc);
+ r = ht.get(get);
+ kvs = r.rawCells();
+ assertEquals(QUALIFIERS.length, kvs.length);
+ for (int i = 0; i < QUALIFIERS.length; i++) {
+ assertIncrementKey(kvs[i], ROWS[0], FAMILY, QUALIFIERS[i], 2*(i+1));
+ }
+ }
+
+
+ /**
+ * Call over to the adjacent class's method of same name.
+ */
+ static void assertIncrementKey(Cell key, byte [] row, byte [] family,
+ byte [] qualifier, long value) throws Exception {
+ TestFromClientSide.assertIncrementKey(key, row, family, qualifier, value);
+ }
+
+ public static String filterStringSoTableNameSafe(final String str) {
+ return str.replaceAll("\\[fast\\=(.*)\\]", ".FAST.is.$1");
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index d15a7f4..0626dce 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.regionserver;
import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -65,7 +66,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WAL;
import org.junit.After;
import org.junit.Before;
@@ -183,12 +183,14 @@ public class TestAtomicOperation {
*/
@Test
public void testIncrementMultiThreads() throws IOException {
+ boolean fast = true;
LOG.info("Starting test testIncrementMultiThreads");
// run a with mixed column families (1 and 3 versions)
initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
- // create 25 threads, each will increment by its own quantity
- int numThreads = 25;
+ // Create 100 threads, each will increment by its own quantity. All 100 threads update the
+ // same row over two column families.
+ int numThreads = 100;
int incrementsPerThread = 1000;
Incrementer[] all = new Incrementer[numThreads];
int expectedTotal = 0;
@@ -211,9 +213,9 @@ public class TestAtomicOperation {
LOG.info("Ignored", e);
}
}
- assertICV(row, fam1, qual1, expectedTotal);
- assertICV(row, fam1, qual2, expectedTotal*2);
- assertICV(row, fam2, qual3, expectedTotal*3);
+ assertICV(row, fam1, qual1, expectedTotal, fast);
+ assertICV(row, fam1, qual2, expectedTotal*2, fast);
+ assertICV(row, fam2, qual3, expectedTotal*3, fast);
LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal);
}
@@ -221,9 +223,11 @@ public class TestAtomicOperation {
private void assertICV(byte [] row,
byte [] familiy,
byte[] qualifier,
- long amount) throws IOException {
+ long amount,
+ boolean fast) throws IOException {
// run a get and see?
Get get = new Get(row);
+ if (fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
get.addColumn(familiy, qualifier);
Result result = region.get(get);
assertEquals(1, result.size());
@@ -254,7 +258,8 @@ public class TestAtomicOperation {
}
/**
- * A thread that makes a few increment calls
+ * A thread that makes increment calls always on the same row, this.row against two column
+ * families on this row.
*/
public static class Incrementer extends Thread {
@@ -263,9 +268,8 @@ public class TestAtomicOperation {
private final int amount;
- public Incrementer(Region region,
- int threadNumber, int amount, int numIncrements) {
- super("incrementer." + threadNumber);
+ public Incrementer(Region region, int threadNumber, int amount, int numIncrements) {
+ super("Incrementer." + threadNumber);
this.region = region;
this.numIncrements = numIncrements;
this.amount = amount;
@@ -281,19 +285,19 @@ public class TestAtomicOperation {
inc.addColumn(fam1, qual2, amount*2);
inc.addColumn(fam2, qual3, amount*3);
inc.setDurability(Durability.ASYNC_WAL);
- region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
-
- // verify: Make sure we only see completed increments
- Get g = new Get(row);
- Result result = region.get(g);
+ Result result = region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
if (result != null) {
- assertTrue(result.getValue(fam1, qual1) != null);
- assertTrue(result.getValue(fam1, qual2) != null);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2,
Bytes.toLong(result.getValue(fam1, qual2)));
assertTrue(result.getValue(fam2, qual3) != null);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3,
Bytes.toLong(result.getValue(fam2, qual3)));
+ assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2,
+ Bytes.toLong(result.getValue(fam1, qual2)));
+ long fam1Increment = Bytes.toLong(result.getValue(fam1, qual1))*3;
+ long fam2Increment = Bytes.toLong(result.getValue(fam2, qual3));
+ assertEquals("fam1=" + fam1Increment + ", fam2=" + fam2Increment,
+ fam1Increment, fam2Increment);
}
} catch (IOException e) {
e.printStackTrace();
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index cef92a6..60bc155 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -5010,6 +5010,7 @@ public class TestHRegion {
put.addColumn(family, qf, null);
}
region.put(put);
+ LOG.info(put.toString());
}
}
@@ -6058,7 +6059,6 @@ public class TestHRegion {
final HTableDescriptor htd, final RegionServerServices rsServices) {
super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices);
}
- @Override
protected long getNextSequenceId(WAL wal) throws IOException {
return 42;
}
@@ -6606,11 +6606,13 @@ public class TestHRegion {
p.addColumn(fam1, qual1, qual2);
RowMutations rm = new RowMutations(row);
rm.add(p);
- region.checkAndRowMutate(row, fam1, qual1, CompareOp.EQUAL, new BinaryComparator(qual1),
- rm, false);
+ assertTrue(region.checkAndRowMutate(row, fam1, qual1, CompareOp.EQUAL,
+ new BinaryComparator(qual1), rm, false));
result = region.get(new Get(row));
c = result.getColumnLatestCell(fam1, qual1);
assertEquals(c.getTimestamp(), 10L);
+ LOG.info("c value " +
+ Bytes.toStringBinary(c.getValueArray(), c.getValueOffset(), c.getValueLength()));
assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(),
qual2, 0, qual2.length));
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 85b2a9d..4d5d7d8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -18,7 +18,15 @@
package org.apache.hadoop.hbase.regionserver;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hbase.regionserver.TestHRegion.assertGet;
+import static org.apache.hadoop.hbase.regionserver.TestHRegion.putData;
+import static org.apache.hadoop.hbase.regionserver.TestHRegion.verifyData;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.mock;
@@ -26,7 +34,6 @@ import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
-import static org.apache.hadoop.hbase.regionserver.TestHRegion.*;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -40,7 +47,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
@@ -63,9 +69,9 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
@@ -108,7 +114,6 @@ public class TestHRegionReplayEvents {
public static Configuration CONF ;
private String dir;
- private static FileSystem FILESYSTEM;
private byte[][] families = new byte[][] {
Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")};
@@ -134,7 +139,6 @@ public class TestHRegionReplayEvents {
@Before
public void setup() throws IOException {
TEST_UTIL = HBaseTestingUtility.createLocalHTU();
- FILESYSTEM = TEST_UTIL.getTestFileSystem();
CONF = TEST_UTIL.getConfiguration();
dir = TEST_UTIL.getDataTestDir("TestHRegionReplayEvents").toString();
method = name.getMethodName();
@@ -268,7 +272,7 @@ public class TestHRegionReplayEvents {
if (flushDesc != null) {
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
- PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
+ secondaryRegion.replayWALFlushStartMarker(flushDesc);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- NOT Replaying flush commit in secondary");
}
@@ -299,7 +303,7 @@ public class TestHRegionReplayEvents {
}
WAL.Reader createWALReaderForPrimary() throws FileNotFoundException, IOException {
- return wals.createReader(TEST_UTIL.getTestFileSystem(),
+ return WALFactory.createReader(TEST_UTIL.getTestFileSystem(),
DefaultWALProvider.getCurrentFileName(walPrimary),
TEST_UTIL.getConfiguration());
}
@@ -769,7 +773,7 @@ public class TestHRegionReplayEvents {
// ensure all files are visible in secondary
for (Store store : secondaryRegion.getStores()) {
- assertTrue(store.getMaxSequenceId() <= secondaryRegion.getSequenceId());
+ assertTrue(store.getMaxSequenceId() <= secondaryRegion.getReadPoint(null));
}
LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc);
@@ -1095,7 +1099,7 @@ public class TestHRegionReplayEvents {
assertGet(region, family, row);
// region seqId should have advanced at least to this seqId
- assertEquals(origSeqId, region.getSequenceId());
+ assertEquals(origSeqId, region.getReadPoint(null));
// replay an entry that is smaller than current read point
// caution: adding an entry below current read point might cause partial dirty reads. Normal
@@ -1115,7 +1119,6 @@ public class TestHRegionReplayEvents {
* events to its WAL.
* @throws IOException
*/
- @SuppressWarnings("unchecked")
@Test
public void testSecondaryRegionDoesNotWriteRegionEventsToWAL() throws IOException {
secondaryRegion.close();
@@ -1252,10 +1255,18 @@ public class TestHRegionReplayEvents {
// put some data in primary
putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families);
primaryRegion.flush(true);
+ // I seem to need to push more edits through so the WAL flushes on local fs. This was not
+ // needed before HBASE-15028. Not sure whats up. I can see that we have not flushed if I
+ // look at the WAL if I pause the test here and then use WALPrettyPrinter to look at content..
+ // Doing same check before HBASE-15028 I can see all edits flushed to the WAL. Somethings up
+ // but can't figure it... and this is only test that seems to suffer this flush issue.
+ // St.Ack 20160201
+ putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
+ LOG.info(entry);
if (entry == null) {
break;
}
@@ -1660,4 +1671,4 @@ public class TestHRegionReplayEvents {
return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf,
isReadOnly, durability, wal, families);
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
new file mode 100644
index 0000000..1583bf8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
@@ -0,0 +1,245 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TestIncrementsFromClientSide;
+import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
+
+
+/**
+ * Increments with some concurrency against a region to ensure we get the right answer.
+ * Test is parameterized to run the fast and slow path increments; if fast,
+ * HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY is true.
+ *
+ * <p>There is similar test up in TestAtomicOperation. It does a test where it has 100 threads
+ * doing increments across two column families all on one row and the increments are connected to
+ * prove atomicity on row.
+ */
+@Category(MediumTests.class)
+public class TestRegionIncrement {
+ private static final Log LOG = LogFactory.getLog(TestRegionIncrement.class);
+ @Rule public TestName name = new TestName();
+ @Rule public final TestRule timeout =
+ CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+ withLookingForStuckThread(true).build();
+ private static HBaseTestingUtility TEST_UTIL;
+ private final static byte [] INCREMENT_BYTES = Bytes.toBytes("increment");
+ private static final int THREAD_COUNT = 10;
+ private static final int INCREMENT_COUNT = 10000;
+
+ @Before
+ public void setUp() throws Exception {
+ TEST_UTIL = HBaseTestingUtility.createLocalHTU();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TEST_UTIL.cleanupTestDir();
+ }
+
+ private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
+ WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
+ TEST_UTIL.getDataTestDir().toString(), conf);
+ return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
+ HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
+ false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
+ }
+
+ private void closeRegion(final HRegion region) throws IOException {
+ region.close();
+ region.getWAL().close();
+ }
+
+ @Test
+ public void testMVCCCausingMisRead() throws IOException {
+ final HRegion region = getRegion(TEST_UTIL.getConfiguration(), this.name.getMethodName());
+ try {
+ // ADD TEST HERE!!
+ } finally {
+ closeRegion(region);
+ }
+ }
+
+ /**
+ * Increments a single cell a bunch of times.
+ */
+ private static class SingleCellIncrementer extends Thread {
+ private final int count;
+ private final HRegion region;
+ private final Increment increment;
+
+ SingleCellIncrementer(final int i, final int count, final HRegion region,
+ final Increment increment) {
+ super("" + i);
+ setDaemon(true);
+ this.count = count;
+ this.region = region;
+ this.increment = increment;
+ }
+
+ @Override
+ public void run() {
+ for (int i = 0; i < this.count; i++) {
+ try {
+ this.region.increment(this.increment);
+ // LOG.info(getName() + " " + i);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Increments a random row's Cell <code>count</code> times.
+ */
+ private static class CrossRowCellIncrementer extends Thread {
+ private final int count;
+ private final HRegion region;
+ private final Increment [] increments;
+
+ CrossRowCellIncrementer(final int i, final int count, final HRegion region, final int range) {
+ super("" + i);
+ setDaemon(true);
+ this.count = count;
+ this.region = region;
+ this.increments = new Increment[range];
+ for (int ii = 0; ii < range; ii++) {
+ this.increments[ii] = new Increment(Bytes.toBytes(i));
+ this.increments[ii].addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1);
+ }
+ }
+
+ @Override
+ public void run() {
+ for (int i = 0; i < this.count; i++) {
+ try {
+ int index = ThreadLocalRandom.current().nextInt(0, this.increments.length);
+ this.region.increment(this.increments[index]);
+ // LOG.info(getName() + " " + index);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Have each thread update its own Cell. Avoid contention with another thread.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testUnContendedSingleCellIncrement()
+ throws IOException, InterruptedException {
+ final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
+ TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
+ long startTime = System.currentTimeMillis();
+ try {
+ SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT];
+ for (int i = 0; i < threads.length; i++) {
+ byte [] rowBytes = Bytes.toBytes(i);
+ Increment increment = new Increment(rowBytes);
+ increment.addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1);
+ threads[i] = new SingleCellIncrementer(i, INCREMENT_COUNT, region, increment);
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ RegionScanner regionScanner = region.getScanner(new Scan());
+ List<Cell> cells = new ArrayList<Cell>(THREAD_COUNT);
+ while(regionScanner.next(cells)) continue;
+ assertEquals(THREAD_COUNT, cells.size());
+ long total = 0;
+ for (Cell cell: cells) total +=
+ Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+ assertEquals(INCREMENT_COUNT * THREAD_COUNT, total);
+ } finally {
+ closeRegion(region);
+ LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms");
+ }
+ }
+
+ /**
+ * Have each thread update its own Cell. Avoid contention with another thread.
+ * This is
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testContendedAcrossCellsIncrement()
+ throws IOException, InterruptedException {
+ final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
+ TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
+ long startTime = System.currentTimeMillis();
+ try {
+ CrossRowCellIncrementer [] threads = new CrossRowCellIncrementer[THREAD_COUNT];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new CrossRowCellIncrementer(i, INCREMENT_COUNT, region, THREAD_COUNT);
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ RegionScanner regionScanner = region.getScanner(new Scan());
+ List<Cell> cells = new ArrayList<Cell>(100);
+ while(regionScanner.next(cells)) continue;
+ assertEquals(THREAD_COUNT, cells.size());
+ long total = 0;
+ for (Cell cell: cells) total +=
+ Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+ assertEquals(INCREMENT_COUNT * THREAD_COUNT, total);
+ } finally {
+ closeRegion(region);
+ LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms");
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
index 64cdff2..2329e9d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
@@ -1,4 +1,5 @@
/**
+
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
index 0f7f23a..d99643d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java
@@ -551,7 +551,7 @@ public class TestTags {
public static class TestCoprocessorForTags extends BaseRegionObserver {
- public static boolean checkTagPresence = false;
+ public static volatile boolean checkTagPresence = false;
public static List<Tag> tags = null;
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
index af47465..fd6d535 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
@@ -408,13 +408,13 @@ public class TestFSHLog {
}
region.flush(true);
// FlushResult.flushSequenceId is not visible here so go get the current sequence id.
- long currentSequenceId = region.getSequenceId();
+ long currentSequenceId = region.getReadPoint(null);
// Now release the appends
goslow.setValue(false);
synchronized (goslow) {
goslow.notifyAll();
}
- assertTrue(currentSequenceId >= region.getSequenceId());
+ assertTrue(currentSequenceId >= region.getReadPoint(null));
} finally {
region.close(true);
wal.close();
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index 011c75d..f004aeb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -894,7 +894,7 @@ public class TestWALReplay {
for (HColumnDescriptor hcd : htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), 5, this.ee, region, "x");
}
- long lastestSeqNumber = region.getSequenceId();
+ long lastestSeqNumber = region.getReadPoint(null);
// get the current seq no
wal.doCompleteCacheFlush = true;
// allow complete cache flush with the previous seq number got after first
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index 47d2880..727b9bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -58,14 +56,12 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.mapreduce.Job;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import com.google.protobuf.ByteString;
-import com.sun.tools.javac.code.Attribute.Array;
-
@Category({ReplicationTests.class, LargeTests.class})
public class TestReplicationSmallTests extends TestReplicationBase {
http://git-wip-us.apache.org/repos/asf/hbase/blob/ed46591f/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
index a870ed8..84d3bfc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java
@@ -318,5 +318,4 @@ public class TestRegionReplicaReplicationEndpointNoMaster {
closeRegion(HTU, rs0, hriSecondary);
connection.close();
}
-
}