You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2015/10/28 00:29:00 UTC
[01/17] hbase git commit: HBASE-14689 Addendum and unit test for
HBASE-13471
Repository: hbase
Updated Branches:
refs/heads/hbase-12439 899857609 -> d5d81d675
HBASE-14689 Addendum and unit test for HBASE-13471
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c04e806
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c04e806
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c04e806
Branch: refs/heads/hbase-12439
Commit: 4c04e8065f205a89b84670a9865149237499dd86
Parents: 8998576
Author: Enis Soztutar <en...@apache.org>
Authored: Mon Oct 26 11:56:51 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Mon Oct 26 11:56:51 2015 -0700
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/HRegion.java | 14 ++---
.../hadoop/hbase/regionserver/TestHRegion.java | 56 +++++++++++++++++++-
2 files changed, 62 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/4c04e806/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 34738de..07924e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2961,11 +2961,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
} catch (IOException ioe) {
LOG.warn("Failed getting lock in batch put, row="
+ Bytes.toStringBinary(mutation.getRow()), ioe);
+ throw ioe;
}
if (rowLock == null) {
// We failed to grab another lock
- assert false: "Should never fail to get lock when blocking";
- break; // stop acquiring more rows for this batch
+ throw new IOException("Failed getting lock in batch put, row=" +
+ Bytes.toStringBinary(mutation.getRow()));
} else {
acquiredRowLocks.add(rowLock);
}
@@ -5055,6 +5056,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @param readLock is the lock reader or writer. True indicates that a non-exlcusive
* lock is requested
*/
+ @Override
public RowLock getRowLock(byte[] row, boolean readLock) throws IOException {
// Make sure the row is inside of this region before getting the lock for it.
checkRow(row, "row lock");
@@ -6947,7 +6949,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
*/
private static List<Tag> carryForwardTags(final Cell cell, final List<Tag> tags) {
if (cell.getTagsLength() <= 0) return tags;
- List<Tag> newTags = tags == null? new ArrayList<Tag>(): /*Append Tags*/tags;
+ List<Tag> newTags = tags == null? new ArrayList<Tag>(): /*Append Tags*/tags;
Iterator<Tag> i =
CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
while (i.hasNext()) newTags.add(i.next());
@@ -7228,7 +7230,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// They are subtley different in quiet a few ways. This came out only
// after study. I am not sure that many of the differences are intentional.
- // TODO: St.Ack 20150907
+ // TODO: St.Ack 20150907
@Override
public Result increment(Increment mutation, long nonceGroup, long nonce)
@@ -7242,7 +7244,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
boolean writeToWAL = durability != Durability.SKIP_WAL;
WALEdit walEdits = null;
List<Cell> allKVs = new ArrayList<Cell>(mutation.size());
-
+
Map<Store, List<Cell>> tempMemstore = new HashMap<Store, List<Cell>>();
long size = 0;
long txid = 0;
@@ -8045,7 +8047,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
WALKey key = new HLogKey(getRegionInfo().getEncodedNameAsBytes(),
getRegionInfo().getTable(), WALKey.NO_SEQUENCE_ID, 0, null,
HConstants.NO_NONCE, HConstants.NO_NONCE, getMVCC());
-
+
// Call append but with an empty WALEdit. The returned sequence id will not be associated
// with any edit and we can be sure it went in after all outstanding appends.
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/4c04e806/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index ed45c2d..06517d7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -56,7 +56,11 @@ import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.UUID;
+import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@@ -99,6 +103,7 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
@@ -6166,7 +6171,7 @@ public class TestHRegion {
key.setWriteEntry(we);
return 1L;
}
-
+
});
return wal;
}
@@ -6189,7 +6194,7 @@ public class TestHRegion {
// capture append() calls
WAL wal = mockWAL();
when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
-
+
// open a region first so that it can be closed later
region = HRegion.openHRegion(hri, htd, rss.getWAL(hri),
@@ -6529,6 +6534,53 @@ public class TestHRegion {
qual2, 0, qual2.length));
}
+ @Test(timeout = 30000)
+ public void testBatchMutateWithWrongRegionException() throws IOException, InterruptedException {
+ final byte[] a = Bytes.toBytes("a");
+ final byte[] b = Bytes.toBytes("b");
+ final byte[] c = Bytes.toBytes("c"); // exclusive
+
+ int prevLockTimeout = CONF.getInt("hbase.rowlock.wait.duration", 30000);
+ CONF.setInt("hbase.rowlock.wait.duration", 3000);
+ final HRegion region = initHRegion(tableName, a, c, name.getMethodName(), CONF, false, fam1);
+
+ Mutation[] mutations = new Mutation[] {
+ new Put(a).addImmutable(fam1, null, null),
+ new Put(c).addImmutable(fam1, null, null), // this is outside the region boundary
+ new Put(b).addImmutable(fam1, null, null),
+ };
+
+ OperationStatus[] status = region.batchMutate(mutations);
+ assertEquals(status[0].getOperationStatusCode(), OperationStatusCode.SUCCESS);
+ assertEquals(status[1].getOperationStatusCode(), OperationStatusCode.SANITY_CHECK_FAILURE);
+ assertEquals(status[2].getOperationStatusCode(), OperationStatusCode.SUCCESS);
+
+ // test with a leaked row lock
+ ExecutorService exec = Executors.newSingleThreadExecutor();
+ exec.submit(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ region.getRowLock(b);
+ return null;
+ }
+ });
+ exec.shutdown();
+ exec.awaitTermination(30, TimeUnit.SECONDS);
+
+ mutations = new Mutation[] {
+ new Put(a).addImmutable(fam1, null, null),
+ new Put(b).addImmutable(fam1, null, null),
+ };
+
+ try {
+ status = region.batchMutate(mutations);
+ fail("Failed to throw exception");
+ } catch (IOException expected) {
+ }
+
+ CONF.setInt("hbase.rowlock.wait.duration", prevLockTimeout);
+ }
+
static HRegion initHRegion(TableName tableName, String callingMethod,
byte[]... families) throws IOException {
return initHRegion(tableName, callingMethod, HBaseConfiguration.create(),
[17/17] hbase git commit: HBASE-14674 Rpc handler / task monitoring
seems to be broken after 0.98 (Heng Chen)
Posted by sy...@apache.org.
HBASE-14674 Rpc handler / task monitoring seems to be broken after 0.98 (Heng Chen)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5d81d67
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5d81d67
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5d81d67
Branch: refs/heads/hbase-12439
Commit: d5d81d675ace2d87c4ac19562b6b0a29da3d8902
Parents: dfa0528
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Oct 27 14:24:21 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Oct 27 14:59:29 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/ipc/CallRunner.java | 19 ++++---------------
.../hadoop/hbase/ipc/FifoRpcScheduler.java | 2 +-
.../org/apache/hadoop/hbase/ipc/RpcExecutor.java | 3 +++
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 16 +++++++++++++++-
.../apache/hadoop/hbase/ipc/TestCallRunner.java | 2 ++
.../hadoop/hbase/ipc/TestSimpleRpcScheduler.java | 4 ++++
6 files changed, 29 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d81d67/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
index ede4b4e..5b52521 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ipc.RpcServer.Call;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
@@ -59,20 +58,22 @@ public class CallRunner {
this.rpcServer = rpcServer;
// Add size of the call to queue size.
this.rpcServer.addCallSize(call.getSize());
- this.status = getStatus();
}
public Call getCall() {
return call;
}
+ public void setStatus(MonitoredRPCHandler status) {
+ this.status = status;
+ }
+
/**
* Cleanup after ourselves... let go of references.
*/
private void cleanup() {
this.call = null;
this.rpcServer = null;
- this.status = null;
}
public void run() {
@@ -160,16 +161,4 @@ public class CallRunner {
cleanup();
}
}
-
- MonitoredRPCHandler getStatus() {
- // It is ugly the way we park status up in RpcServer. Let it be for now. TODO.
- MonitoredRPCHandler status = RpcServer.MONITORED_RPC.get();
- if (status != null) {
- return status;
- }
- status = TaskMonitor.get().createRPCStatus(Thread.currentThread().getName());
- status.pause("Waiting for a call");
- RpcServer.MONITORED_RPC.set(status);
- return status;
- }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d81d67/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
index 8140c1c..621a8ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
@@ -24,7 +24,6 @@ import java.io.IOException;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hbase.ipc.CallRunner;
/**
* A very simple {@code }RpcScheduler} that serves incoming requests in order.
@@ -70,6 +69,7 @@ public class FifoRpcScheduler extends RpcScheduler {
executor.submit(new Runnable() {
@Override
public void run() {
+ task.setStatus(RpcServer.getStatus());
task.run();
}
});
http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d81d67/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
index 709429d..27750a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
@@ -124,7 +125,9 @@ public abstract class RpcExecutor {
try {
while (running) {
try {
+ MonitoredRPCHandler status = RpcServer.getStatus();
CallRunner task = myQueue.take();
+ task.setStatus(status);
try {
activeHandlerCount.incrementAndGet();
task.run();
http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d81d67/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index c20e972..6cbce95 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.exceptions.RegionMovedException;
import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
import org.apache.hadoop.hbase.io.BoundedByteBufferPool;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
@@ -2314,7 +2315,8 @@ public class RpcServer implements RpcServerInterface {
* @param user client user
* @param connection incoming connection
* @param addr InetAddress of incoming connection
- * @throws org.apache.hadoop.security.authorize.AuthorizationException when the client isn't authorized to talk the protocol
+ * @throws org.apache.hadoop.security.authorize.AuthorizationException
+ * when the client isn't authorized to talk the protocol
*/
public void authorize(UserGroupInformation user, ConnectionHeader connection, InetAddress addr)
throws AuthorizationException {
@@ -2498,6 +2500,18 @@ public class RpcServer implements RpcServerInterface {
return bsasi == null? null: bsasi.getBlockingService();
}
+ static MonitoredRPCHandler getStatus() {
+ // It is ugly the way we park status up in RpcServer. Let it be for now. TODO.
+ MonitoredRPCHandler status = RpcServer.MONITORED_RPC.get();
+ if (status != null) {
+ return status;
+ }
+ status = TaskMonitor.get().createRPCStatus(Thread.currentThread().getName());
+ status.pause("Waiting for a call");
+ RpcServer.MONITORED_RPC.set(status);
+ return status;
+ }
+
/** Returns the remote side ip address when invoked inside an RPC
* Returns null incase of an error.
* @return InetAddress
http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d81d67/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java
index ec06ff3..47c15ae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.ipc;
+import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl;
import org.apache.hadoop.hbase.testclassification.RPCTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test;
@@ -35,6 +36,7 @@ public class TestCallRunner {
RpcServer.Call mockCall = Mockito.mock(RpcServer.Call.class);
mockCall.connection = Mockito.mock(RpcServer.Connection.class);
CallRunner cr = new CallRunner(mockRpcServer, mockCall);
+ cr.setStatus(new MonitoredRPCHandlerImpl());
cr.run();
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d81d67/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 2b7ffb2..087429a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.RPCTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -87,6 +88,7 @@ public class TestSimpleRpcScheduler {
scheduler.init(CONTEXT);
scheduler.start();
CallRunner task = createMockTask();
+ task.setStatus(new MonitoredRPCHandlerImpl());
scheduler.dispatch(task);
verify(task, timeout(1000)).run();
scheduler.stop();
@@ -121,6 +123,7 @@ public class TestSimpleRpcScheduler {
}
};
for (CallRunner task : tasks) {
+ task.setStatus(new MonitoredRPCHandlerImpl());
doAnswer(answerToRun).when(task).run();
}
@@ -303,6 +306,7 @@ public class TestSimpleRpcScheduler {
private void doAnswerTaskExecution(final CallRunner callTask,
final ArrayList<Integer> results, final int value, final int sleepInterval) {
+ callTask.setStatus(new MonitoredRPCHandlerImpl());
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) {
[10/17] hbase git commit: HBASE-14257 Periodic flusher only handles
hbase:meta, not other system tables. (Abhishek)
Posted by sy...@apache.org.
HBASE-14257 Periodic flusher only handles hbase:meta, not other system tables. (Abhishek)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4269c7fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4269c7fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4269c7fe
Branch: refs/heads/hbase-12439
Commit: 4269c7fe69baabbd813dcdf6ed0b3a57ea4b41de
Parents: ac746e0
Author: anoopsjohn <an...@gmail.com>
Authored: Tue Oct 27 11:52:03 2015 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Tue Oct 27 11:52:03 2015 +0530
----------------------------------------------------------------------
.../java/org/apache/hadoop/hbase/regionserver/HRegion.java | 7 ++++---
.../apache/hadoop/hbase/regionserver/TestDefaultMemStore.java | 6 +++---
2 files changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/4269c7fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 07924e6..0fcdaab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1307,7 +1307,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
"hbase.regionserver.optionalcacheflushinterval";
/** Default interval for the memstore flush */
public static final int DEFAULT_CACHE_FLUSH_INTERVAL = 3600000;
- public static final int META_CACHE_FLUSH_INTERVAL = 300000; // 5 minutes
+ /** Default interval for System tables memstore flush */
+ public static final int SYSTEM_CACHE_FLUSH_INTERVAL = 300000; // 5 minutes
/** Conf key to force a flush if there are already enough changes for one region in memstore */
public static final String MEMSTORE_FLUSH_PER_CHANGES =
@@ -1995,9 +1996,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return true;
}
long modifiedFlushCheckInterval = flushCheckInterval;
- if (getRegionInfo().isMetaRegion() &&
+ if (getRegionInfo().isSystemTable() &&
getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
- modifiedFlushCheckInterval = META_CACHE_FLUSH_INTERVAL;
+ modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL;
}
if (modifiedFlushCheckInterval <= 0) { //disabled
return false;
http://git-wip-us.apache.org/repos/asf/hbase/blob/4269c7fe/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 882fd53..f1e20d1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -944,10 +944,10 @@ public class TestDefaultMemStore extends TestCase {
public void testShouldFlushMeta() throws Exception {
// write an edit in the META and ensure the shouldFlush (that the periodic memstore
- // flusher invokes) returns true after META_CACHE_FLUSH_INTERVAL (even though
+ // flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
// the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
Configuration conf = new Configuration();
- conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.META_CACHE_FLUSH_INTERVAL * 10);
+ conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
Path testDir = hbaseUtility.getDataTestDir();
EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
@@ -969,7 +969,7 @@ public class TestDefaultMemStore extends TestCase {
edge.setCurrentTimeMillis(1234 + 100);
StringBuffer sb = new StringBuffer();
assertTrue(meta.shouldFlush(sb) == false);
- edge.setCurrentTimeMillis(edge.currentTime() + HRegion.META_CACHE_FLUSH_INTERVAL + 1);
+ edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
assertTrue(meta.shouldFlush(sb) == true);
}
[13/17] hbase git commit: HBASE-14655 Addendum passes User to
store#compact()
Posted by sy...@apache.org.
HBASE-14655 Addendum passes User to store#compact()
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e04e7402
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e04e7402
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e04e7402
Branch: refs/heads/hbase-12439
Commit: e04e7402cd5df5fc7001101ad50fc2dbf8de5c1e
Parents: f91546f
Author: tedyu <yu...@gmail.com>
Authored: Tue Oct 27 11:17:30 2015 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue Oct 27 11:17:30 2015 -0700
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e04e7402/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0fcdaab..19bcd33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1823,7 +1823,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// We no longer need to cancel the request on the way out of this
// method because Store#compact will clean up unconditionally
requestNeedsCancellation = false;
- store.compact(compaction, throughputController);
+ store.compact(compaction, throughputController, user);
} catch (InterruptedIOException iioe) {
String msg = "compaction interrupted";
LOG.info(msg, iioe);
[16/17] hbase git commit: HBASE-14705 Javadoc for KeyValue
constructor is not correct (Jean-Marc Spaggiari)
Posted by sy...@apache.org.
HBASE-14705 Javadoc for KeyValue constructor is not correct (Jean-Marc Spaggiari)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfa05284
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfa05284
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfa05284
Branch: refs/heads/hbase-12439
Commit: dfa05284cfef985e806660de0a1415f0fa7c2211
Parents: 16ff57b
Author: Andrew Purtell <ap...@apache.org>
Authored: Tue Oct 27 14:39:03 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Tue Oct 27 14:40:28 2015 -0700
----------------------------------------------------------------------
hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/dfa05284/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 7534e9d..933dd1d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -396,7 +396,8 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
}
/**
- * Constructs KeyValue structure filled with null value.
+ * Constructs KeyValue structure as a put filled with specified values and
+ * LATEST_TIMESTAMP.
* @param row - row key (arbitrary byte array)
* @param family family name
* @param qualifier column qualifier
[06/17] hbase git commit: HBASE-14283 Reverse scan doesn’t work with HFile inline index/bloom blocks
Posted by sy...@apache.org.
HBASE-14283 Reverse scan doesn’t work with HFile inline index/bloom blocks
Signed-off-by: Andrew Purtell <ap...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c56e239
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c56e239
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c56e239
Branch: refs/heads/hbase-12439
Commit: 5c56e239c3af22e1232681cceaed7bd96480ed92
Parents: efb8295
Author: Ben Lau <be...@yahoo-inc.com>
Authored: Wed Oct 7 16:46:16 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon Oct 26 15:12:21 2015 -0700
----------------------------------------------------------------------
.../hadoop/hbase/io/hfile/HFileReaderImpl.java | 7 +-
.../hfile/TestSeekBeforeWithInlineBlocks.java | 189 +++++++++++++++++++
2 files changed, 194 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/5c56e239/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 5af72b6..49b6f5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -859,9 +859,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
reader.returnBlock(seekToBlock);
// It is important that we compute and pass onDiskSize to the block
// reader so that it does not have to read the header separately to
- // figure out the size.
+ // figure out the size. Currently, we do not have a way to do this
+ // correctly in the general case however.
+ // TODO: See https://issues.apache.org/jira/browse/HBASE-14576
+ int prevBlockSize = -1;
seekToBlock = reader.readBlock(previousBlockOffset,
- seekToBlock.getOffset() - previousBlockOffset, cacheBlocks,
+ prevBlockSize, cacheBlocks,
pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding());
// TODO shortcut: seek forward in this block to the last key of the
// block.
http://git-wip-us.apache.org/repos/asf/hbase/blob/5c56e239/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java
new file mode 100644
index 0000000..c9af3d3
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java
@@ -0,0 +1,189 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io.hfile;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.BloomFilterFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({IOTests.class, MediumTests.class})
+public class TestSeekBeforeWithInlineBlocks {
+
+ private static final Log LOG = LogFactory.getLog(TestSeekBeforeWithInlineBlocks.class);
+
+ private static final HBaseTestingUtility TEST_UTIL =
+ new HBaseTestingUtility();
+
+ private static final int NUM_KV = 10000;
+
+ private static final int DATA_BLOCK_SIZE = 4096;
+ private static final int BLOOM_BLOCK_SIZE = 1024;
+ private static final int[] INDEX_CHUNK_SIZES = { 65536, 4096, 1024 };
+ private static final int[] EXPECTED_NUM_LEVELS = { 1, 2, 3 };
+
+ private static final Random RAND = new Random(192537);
+ private static final byte[] FAM = Bytes.toBytes("family");
+
+ private FileSystem fs;
+ private Configuration conf;
+
+ /**
+ * Scanner.seekBefore() could fail because when seeking to a previous HFile data block, it needs
+ * to know the size of that data block, which it calculates using current data block offset and
+ * the previous data block offset. This fails to work when there are leaf-level index blocks in
+ * the scannable section of the HFile, i.e. starting in HFileV2. This test will try seekBefore()
+ * on a flat (single-level) and multi-level (2,3) HFile and confirm this bug is now fixed. This
+ * bug also happens for inline Bloom blocks for the same reasons.
+ */
+ @Test
+ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException {
+ conf = TEST_UTIL.getConfiguration();
+
+ // Try out different HFile versions to ensure reverse scan works on each version
+ for (int hfileVersion = HFile.MIN_FORMAT_VERSION_WITH_TAGS;
+ hfileVersion <= HFile.MAX_FORMAT_VERSION; hfileVersion++) {
+
+ conf.setInt(HFile.FORMAT_VERSION_KEY, hfileVersion);
+ fs = HFileSystem.get(conf);
+
+ // Try out different bloom types because inline Bloom blocks break seekBefore()
+ for (BloomType bloomType : BloomType.values()) {
+
+ // Test out HFile block indices of various sizes/levels
+ for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; testI++) {
+ int indexBlockSize = INDEX_CHUNK_SIZES[testI];
+ int expectedNumLevels = EXPECTED_NUM_LEVELS[testI];
+
+ LOG.info(String.format("Testing HFileVersion: %s, BloomType: %s, Index Levels: %s",
+ hfileVersion, bloomType, expectedNumLevels));
+
+ conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, indexBlockSize);
+ conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE);
+
+ Cell[] cells = new Cell[NUM_KV];
+
+ Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
+ String.format("testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s",
+ hfileVersion, bloomType, testI));
+
+ // Disable caching to prevent it from hiding any bugs in block seeks/reads
+ conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
+ CacheConfig cacheConf = new CacheConfig(conf);
+
+ // Write the HFile
+ {
+ HFileContext meta = new HFileContextBuilder()
+ .withBlockSize(DATA_BLOCK_SIZE)
+ .build();
+
+ StoreFile.Writer storeFileWriter =
+ new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withFilePath(hfilePath)
+ .withFileContext(meta)
+ .withBloomType(bloomType)
+ .build();
+
+ for (int i = 0; i < NUM_KV; i++) {
+ byte[] row = TestHFileWriterV2.randomOrderedKey(RAND, i);
+ byte[] qual = TestHFileWriterV2.randomRowOrQualifier(RAND);
+ byte[] value = TestHFileWriterV2.randomValue(RAND);
+ KeyValue kv = new KeyValue(row, FAM, qual, value);
+
+ storeFileWriter.append(kv);
+ cells[i] = kv;
+ }
+
+ storeFileWriter.close();
+ }
+
+ // Read the HFile
+ HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, conf);
+
+ // Sanity check the HFile index level
+ assertEquals(expectedNumLevels, reader.getTrailer().getNumDataIndexLevels());
+
+ // Check that we can seekBefore in either direction and with both pread
+ // enabled and disabled
+ for (boolean pread : new boolean[] { false, true }) {
+ HFileScanner scanner = reader.getScanner(true, pread);
+ checkNoSeekBefore(cells, scanner, 0);
+ for (int i = 1; i < NUM_KV; i++) {
+ checkSeekBefore(cells, scanner, i);
+ checkCell(cells[i-1], scanner.getCell());
+ }
+ assertTrue(scanner.seekTo());
+ for (int i = NUM_KV - 1; i >= 1; i--) {
+ checkSeekBefore(cells, scanner, i);
+ checkCell(cells[i-1], scanner.getCell());
+ }
+ checkNoSeekBefore(cells, scanner, 0);
+ scanner.close();
+ }
+
+ reader.close();
+ }
+ }
+ }
+ }
+
+ private void checkSeekBefore(Cell[] cells, HFileScanner scanner, int i)
+ throws IOException {
+ assertEquals("Failed to seek to the key before #" + i + " ("
+ + CellUtil.getCellKeyAsString(cells[i]) + ")", true,
+ scanner.seekBefore(cells[i]));
+ }
+
+ private void checkNoSeekBefore(Cell[] cells, HFileScanner scanner, int i)
+ throws IOException {
+ assertEquals("Incorrectly succeeded in seeking to before first key ("
+ + CellUtil.getCellKeyAsString(cells[i]) + ")", false,
+ scanner.seekBefore(cells[i]));
+ }
+
+ /** Check a key/value pair after it was read by the reader */
+ private void checkCell(Cell expected, Cell actual) {
+ assertTrue(String.format("Expected key %s, but was %s",
+ CellUtil.getCellKeyAsString(expected), CellUtil.getCellKeyAsString(actual)),
+ CellUtil.equals(expected, actual));
+ }
+}
+
[15/17] hbase git commit: HBASE-14680 Two configs for snapshot
timeout and better defaults (Heng Chen)
Posted by sy...@apache.org.
HBASE-14680 Two configs for snapshot timeout and better defaults (Heng Chen)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16ff57be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16ff57be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16ff57be
Branch: refs/heads/hbase-12439
Commit: 16ff57bea94645aae30ba9b6bf4375b2eec202f1
Parents: c91bfff
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Oct 27 11:35:46 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Oct 27 14:01:01 2015 -0700
----------------------------------------------------------------------
.../src/main/resources/hbase-default.xml | 14 ++++++++++
.../snapshot/DisabledTableSnapshotHandler.java | 29 --------------------
.../hbase/master/snapshot/SnapshotManager.java | 14 +++-------
.../snapshot/RegionServerSnapshotManager.java | 4 +--
.../snapshot/SnapshotDescriptionUtils.java | 24 ++++++++++++++--
5 files changed, 41 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/16ff57be/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index 1654391..c64873f 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1686,4 +1686,18 @@ possible configurations would overwhelm and obscure the important.
The max number of threads used in MobCompactor.
</description>
</property>
+ <property>
+ <name>hbase.snapshot.master.timeout.millis</name>
+ <value>300000</value>
+ <description>
+ Timeout for master for the snapshot procedure execution
+ </description>
+ </property>
+ <property>
+ <name>hbase.snapshot.region.timeout</name>
+ <value>300000</value>
+ <description>
+ Timeout for regionservers to keep threads in snapshot request pool waiting
+ </description>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/hbase/blob/16ff57be/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
index 1f2ed0a..5d59229 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
@@ -26,20 +26,16 @@ import java.util.concurrent.ThreadPoolExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener;
-import org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -56,7 +52,6 @@ import org.apache.zookeeper.KeeperException;
@InterfaceStability.Evolving
public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
private static final Log LOG = LogFactory.getLog(DisabledTableSnapshotHandler.class);
- private final TimeoutExceptionInjector timeoutInjector;
/**
* @param snapshot descriptor of the snapshot to take
@@ -65,9 +60,6 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
public DisabledTableSnapshotHandler(SnapshotDescription snapshot,
final MasterServices masterServices) {
super(snapshot, masterServices);
-
- // setup the timer
- timeoutInjector = getMasterTimerAndBindToMonitor(snapshot, conf, monitor);
}
@Override
@@ -81,8 +73,6 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
public void snapshotRegions(List<Pair<HRegionInfo, ServerName>> regionsAndLocations)
throws IOException, KeeperException {
try {
- timeoutInjector.start();
-
// 1. get all the regions hosting this table.
// extract each pair to separate lists
@@ -127,25 +117,6 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
} finally {
LOG.debug("Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " as finished.");
-
- // 3. mark the timer as finished - even if we got an exception, we don't need to time the
- // operation any further
- timeoutInjector.complete();
}
}
-
-
- /**
- * Create a snapshot timer for the master which notifies the monitor when an error occurs
- * @param snapshot snapshot to monitor
- * @param conf configuration to use when getting the max snapshot life
- * @param monitor monitor to notify when the snapshot life expires
- * @return the timer to use update to signal the start and end of the snapshot
- */
- private TimeoutExceptionInjector getMasterTimerAndBindToMonitor(SnapshotDescription snapshot,
- Configuration conf, ForeignExceptionListener monitor) {
- long maxTime = SnapshotDescriptionUtils.getMaxMasterTimeout(conf, snapshot.getType(),
- SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
- return new TimeoutExceptionInjector(monitor, maxTime);
- }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/16ff57be/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 38aa2c0..d367d6e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -122,15 +122,6 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
*/
private static final String SNAPSHOT_WAKE_MILLIS_KEY = "hbase.snapshot.master.wakeMillis";
- /** By default, check to see if the snapshot is complete (ms) */
- private static final int SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 60000;
-
- /**
- * Conf key for # of ms elapsed before injecting a snapshot timeout error when waiting for
- * completion.
- */
- private static final String SNAPSHOT_TIMEOUT_MILLIS_KEY = "hbase.snapshot.master.timeoutMillis";
-
/** Name of the operation to use in the controller */
public static final String ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION = "online-snapshot";
@@ -1078,7 +1069,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
// get the configuration for the coordinator
Configuration conf = master.getConfiguration();
long wakeFrequency = conf.getInt(SNAPSHOT_WAKE_MILLIS_KEY, SNAPSHOT_WAKE_MILLIS_DEFAULT);
- long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
+ long timeoutMillis = Math.max(conf.getLong(SnapshotDescriptionUtils.SNAPSHOT_TIMEOUT_MILLIS_KEY,
+ SnapshotDescriptionUtils.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT),
+ conf.getLong(SnapshotDescriptionUtils.MASTER_SNAPSHOT_TIMEOUT_MILLIS,
+ SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME));
int opThreads = conf.getInt(SNAPSHOT_POOL_THREADS_KEY, SNAPSHOT_POOL_THREADS_DEFAULT);
// setup the default procedure coordinator
http://git-wip-us.apache.org/repos/asf/hbase/blob/16ff57be/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
index e08cf0e..537329a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
@@ -88,8 +88,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
/** Conf key for max time to keep threads in snapshot request pool waiting */
public static final String SNAPSHOT_TIMEOUT_MILLIS_KEY = "hbase.snapshot.region.timeout";
- /** Keep threads alive in request pool for max of 60 seconds */
- public static final long SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 60000;
+ /** Keep threads alive in request pool for max of 300 seconds */
+ public static final long SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 5 * 60000;
/** Conf key for millis between checks to see if snapshot completed or if there are errors*/
public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = "hbase.snapshot.region.wakefrequency";
http://git-wip-us.apache.org/repos/asf/hbase/blob/16ff57be/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 2fc5d83..79e7312 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -105,10 +105,27 @@ public class SnapshotDescriptionUtils {
/** Default value if no start time is specified */
public static final long NO_SNAPSHOT_START_TIME_SPECIFIED = 0;
+
public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis";
- /** By default, wait 60 seconds for a snapshot to complete */
- public static final long DEFAULT_MAX_WAIT_TIME = 60000;
+ /** By default, wait 300 seconds for a snapshot to complete */
+ public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5 ;
+
+
+ /**
+ * By default, check to see if the snapshot is complete (ms)
+ * @deprecated Use {@link #DEFAULT_MAX_WAIT_TIME} instead.
+ * */
+ @Deprecated
+ public static final int SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 60000 * 5;
+
+ /**
+ * Conf key for # of ms elapsed before injecting a snapshot timeout error when waiting for
+ * completion.
+ * @deprecated Use {@link #MASTER_SNAPSHOT_TIMEOUT_MILLIS} instead.
+ */
+ @Deprecated
+ public static final String SNAPSHOT_TIMEOUT_MILLIS_KEY = "hbase.snapshot.master.timeoutMillis";
private SnapshotDescriptionUtils() {
// private constructor for utility class
@@ -128,7 +145,8 @@ public class SnapshotDescriptionUtils {
default:
confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS;
}
- return conf.getLong(confKey, defaultMaxWaitTime);
+ return Math.max(conf.getLong(confKey, defaultMaxWaitTime),
+ conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, defaultMaxWaitTime));
}
/**
[07/17] hbase git commit: HBASE-14682 CM restore functionality for
regionservers is broken
Posted by sy...@apache.org.
HBASE-14682 CM restore functionality for regionservers is broken
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b860025
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b860025
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b860025
Branch: refs/heads/hbase-12439
Commit: 2b860025216898bb34a8e77799dd20a9a638e9c5
Parents: 5c56e23
Author: Enis Soztutar <en...@apache.org>
Authored: Mon Oct 26 16:47:16 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Mon Oct 26 16:47:16 2015 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hbase/DistributedHBaseCluster.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/2b860025/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 07ca5ec..62f5c66 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -431,8 +431,8 @@ public class DistributedHBaseCluster extends HBaseCluster {
protected boolean restoreRegionServers(ClusterStatus initial, ClusterStatus current) {
Set<ServerName> toStart = new TreeSet<ServerName>(new ServerNameIgnoreStartCodeComparator());
Set<ServerName> toKill = new TreeSet<ServerName>(new ServerNameIgnoreStartCodeComparator());
- toStart.addAll(initial.getBackupMasters());
- toKill.addAll(current.getBackupMasters());
+ toStart.addAll(initial.getServers());
+ toKill.addAll(current.getServers());
for (ServerName server : current.getServers()) {
toStart.remove(server);
[05/17] hbase git commit: HBASE-13318 RpcServer.getListenerAddress
should handle when the accept channel is closed
Posted by sy...@apache.org.
HBASE-13318 RpcServer.getListenerAddress should handle when the accept channel is closed
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efb82957
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efb82957
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efb82957
Branch: refs/heads/hbase-12439
Commit: efb82957da09ab06f5c887b3d62ad055bbba089f
Parents: 928dade
Author: Andrew Purtell <ap...@apache.org>
Authored: Mon Oct 26 14:24:42 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon Oct 26 15:11:12 2015 -0700
----------------------------------------------------------------------
.../hbase/ipc/IntegrationTestRpcClient.java | 22 ++++++++++++++++----
.../org/apache/hadoop/hbase/ipc/CallRunner.java | 11 ++++++----
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 17 ++++++++++-----
.../hbase/regionserver/RSRpcServices.java | 6 +++++-
.../hadoop/hbase/ipc/AbstractTestIPC.java | 22 +++++++++++++++-----
.../apache/hadoop/hbase/ipc/TestAsyncIPC.java | 9 ++++++++
.../apache/hadoop/hbase/ipc/TestDelayedRpc.java | 21 +++++++++++++------
.../org/apache/hadoop/hbase/ipc/TestIPC.java | 5 ++++-
.../hadoop/hbase/ipc/TestProtoBufRpc.java | 6 +++++-
.../hbase/ipc/TestRpcHandlerException.java | 7 +++++--
.../TestRSKilledWhenInitializing.java | 8 +++++--
.../hadoop/hbase/security/TestSecureRPC.java | 9 +++++---
.../security/token/TestTokenAuthentication.java | 6 +++++-
13 files changed, 114 insertions(+), 35 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
index 09de871..c28f3e6 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
@@ -168,9 +168,13 @@ public class IntegrationTestRpcClient {
TestRpcServer rpcServer = new TestRpcServer(conf);
rpcServer.start();
- rpcServers.put(rpcServer.getListenerAddress(), rpcServer);
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
+ rpcServers.put(address, rpcServer);
serverList.add(rpcServer);
- LOG.info("Started server: " + rpcServer.getListenerAddress());
+ LOG.info("Started server: " + address);
return rpcServer;
} finally {
lock.writeLock().unlock();
@@ -187,7 +191,13 @@ public class IntegrationTestRpcClient {
int size = rpcServers.size();
int rand = random.nextInt(size);
rpcServer = serverList.remove(rand);
- rpcServers.remove(rpcServer.getListenerAddress());
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ // Throw exception here. We can't remove this instance from the server map because
+ // we no longer have access to its map key
+ throw new IOException("Listener channel is closed");
+ }
+ rpcServers.remove(address);
if (rpcServer != null) {
stopServer(rpcServer);
@@ -305,8 +315,12 @@ public class IntegrationTestRpcClient {
TestRpcServer server = cluster.getRandomServer();
try {
User user = User.getCurrent();
+ InetSocketAddress address = server.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
ret = (EchoResponseProto)
- rpcClient.callBlockingMethod(md, null, param, ret, user, server.getListenerAddress());
+ rpcClient.callBlockingMethod(md, null, param, ret, user, address);
} catch (Exception e) {
LOG.warn(e);
continue; // expected in case connection is closing or closed
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
index 38b7c91..ede4b4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
@@ -16,6 +16,7 @@ package org.apache.hadoop.hbase.ipc;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+import java.net.InetSocketAddress;
import java.nio.channels.ClosedChannelException;
import org.apache.commons.logging.Log;
@@ -96,8 +97,9 @@ public class CallRunner {
TraceScope traceScope = null;
try {
if (!this.rpcServer.isStarted()) {
- throw new ServerNotRunningYetException("Server " + rpcServer.getListenerAddress()
- + " is not running yet");
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ throw new ServerNotRunningYetException("Server " +
+ (address != null ? address : "(channel closed)") + " is not running yet");
}
if (call.tinfo != null) {
traceScope = Trace.startSpan(call.toTraceString(), call.tinfo);
@@ -143,9 +145,10 @@ public class CallRunner {
throw e;
}
} catch (ClosedChannelException cce) {
+ InetSocketAddress address = rpcServer.getListenerAddress();
RpcServer.LOG.warn(Thread.currentThread().getName() + ": caught a ClosedChannelException, " +
- "this means that the server " + rpcServer.getListenerAddress() + " was processing a " +
- "request but the client went away. The error message was: " +
+ "this means that the server " + (address != null ? address : "(channel closed)") +
+ " was processing a request but the client went away. The error message was: " +
cce.getMessage());
} catch (Exception e) {
RpcServer.LOG.warn(Thread.currentThread().getName()
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 8c08635..c20e972 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1813,8 +1813,9 @@ public class RpcServer implements RpcServerInterface {
responder, totalRequestSize, null, null);
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
+ InetSocketAddress address = getListenerAddress();
setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
- "Call queue is full on " + getListenerAddress() +
+ "Call queue is full on " + (address != null ? address : "(channel closed)") +
", is hbase.ipc.server.max.callqueue.size too small?");
responder.doRespond(callTooBig);
return;
@@ -1842,8 +1843,9 @@ public class RpcServer implements RpcServerInterface {
buf, offset, buf.length);
}
} catch (Throwable t) {
- String msg = getListenerAddress() + " is unable to read call parameter from client " +
- getHostAddress();
+ InetSocketAddress address = getListenerAddress();
+ String msg = (address != null ? address : "(channel closed)") +
+ " is unable to read call parameter from client " + getHostAddress();
LOG.warn(msg, t);
metrics.exception(t);
@@ -2266,11 +2268,16 @@ public class RpcServer implements RpcServerInterface {
}
/**
- * Return the socket (ip+port) on which the RPC server is listening to.
- * @return the socket (ip+port) on which the RPC server is listening to.
+ * Return the socket (ip+port) on which the RPC server is listening to. May return null if
+ * the listener channel is closed.
+ * @return the socket (ip+port) on which the RPC server is listening to, or null if this
+ * information cannot be determined
*/
@Override
public synchronized InetSocketAddress getListenerAddress() {
+ if (listener == null) {
+ return null;
+ }
return listener.getAddress();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 38288ef..28bf069 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -972,8 +972,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA,
DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA);
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
// Set our address, however we need the final port that was given to rpcServer
- isa = new InetSocketAddress(initialIsa.getHostName(), rpcServer.getListenerAddress().getPort());
+ isa = new InetSocketAddress(initialIsa.getHostName(), address.getPort());
rpcServer.setErrorHandler(this);
rs.setName(name);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index d427419..dffd8e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -159,10 +159,13 @@ public abstract class AbstractTestIPC {
TestRpcServer rpcServer = new TestRpcServer();
try {
rpcServer.start();
- InetSocketAddress address = rpcServer.getListenerAddress();
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
final String message = "hello";
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build();
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
Pair<Message, CellScanner> r =
client.call(null, md, param, md.getOutputType().toProto(), User.getCurrent(), address,
new MetricsConnection.CallStats());
@@ -200,12 +203,14 @@ public abstract class AbstractTestIPC {
TestRpcServer rpcServer = new TestRpcServer();
try {
rpcServer.start();
- InetSocketAddress address = rpcServer.getListenerAddress();
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
-
PayloadCarryingRpcController pcrc =
new PayloadCarryingRpcController(CellUtil.createCellScanner(cells));
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
Pair<Message, CellScanner> r =
client.call(pcrc, md, param, md.getOutputType().toProto(), User.getCurrent(), address,
new MetricsConnection.CallStats());
@@ -231,9 +236,12 @@ public abstract class AbstractTestIPC {
AbstractRpcClient client = createRpcClientRTEDuringConnectionSetup(conf);
try {
rpcServer.start();
- InetSocketAddress address = rpcServer.getListenerAddress();
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
client.call(null, md, param, null, User.getCurrent(), address,
new MetricsConnection.CallStats());
fail("Expected an exception to have been thrown!");
@@ -258,10 +266,14 @@ public abstract class AbstractTestIPC {
verify(scheduler).start();
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
for (int i = 0; i < 10; i++) {
client.call(new PayloadCarryingRpcController(
CellUtil.createCellScanner(ImmutableList.<Cell> of(CELL))), md, param,
- md.getOutputType().toProto(), User.getCurrent(), rpcServer.getListenerAddress(),
+ md.getOutputType().toProto(), User.getCurrent(), address,
new MetricsConnection.CallStats());
}
verify(scheduler, times(10)).dispatch((CallRunner) anyObject());
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java
index d761ae9..b9d390a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java
@@ -157,6 +157,9 @@ public class TestAsyncIPC extends AbstractTestIPC {
try {
rpcServer.start();
InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
@@ -193,6 +196,9 @@ public class TestAsyncIPC extends AbstractTestIPC {
try {
rpcServer.start();
InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo");
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
@@ -258,6 +264,9 @@ public class TestAsyncIPC extends AbstractTestIPC {
try {
rpcServer.start();
InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
long startTime = System.currentTimeMillis();
User user = User.getCurrent();
for (int i = 0; i < cycles; i++) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java
index 961001f..d379722 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java
@@ -92,9 +92,12 @@ public class TestDelayedRpc {
RpcClient rpcClient = RpcClientFactory.createClient(
conf, HConstants.DEFAULT_CLUSTER_ID.toString());
try {
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(
- ServerName.valueOf(rpcServer.getListenerAddress().getHostName(),
- rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()),
+ ServerName.valueOf(address.getHostName(),address.getPort(), System.currentTimeMillis()),
User.getCurrent(), RPC_CLIENT_TIMEOUT);
TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub =
TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel);
@@ -174,9 +177,12 @@ public class TestDelayedRpc {
RpcClient rpcClient = RpcClientFactory.createClient(
conf, HConstants.DEFAULT_CLUSTER_ID.toString());
try {
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(
- ServerName.valueOf(rpcServer.getListenerAddress().getHostName(),
- rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()),
+ ServerName.valueOf(address.getHostName(),address.getPort(), System.currentTimeMillis()),
User.getCurrent(), RPC_CLIENT_TIMEOUT);
TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub =
TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel);
@@ -298,9 +304,12 @@ public class TestDelayedRpc {
RpcClient rpcClient = RpcClientFactory.createClient(
conf, HConstants.DEFAULT_CLUSTER_ID.toString());
try {
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(
- ServerName.valueOf(rpcServer.getListenerAddress().getHostName(),
- rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()),
+ ServerName.valueOf(address.getHostName(),address.getPort(), System.currentTimeMillis()),
User.getCurrent(), 1000);
TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub =
TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel);
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java
index d1b8202..3fc1259 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java
@@ -122,9 +122,12 @@ public class TestIPC extends AbstractTestIPC {
rm.add(p);
try {
rpcServer.start();
- InetSocketAddress address = rpcServer.getListenerAddress();
long startTime = System.currentTimeMillis();
User user = User.getCurrent();
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
for (int i = 0; i < cycles; i++) {
List<CellScannable> cells = new ArrayList<CellScannable>();
// Message param = RequestConverter.buildMultiRequest(HConstants.EMPTY_BYTE_ARRAY, rm);
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
index cee459f..81869b4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
@@ -101,7 +101,11 @@ public class TestProtoBufRpc {
Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)),
new InetSocketAddress(ADDRESS, PORT), conf,
new FifoRpcScheduler(conf, 10));
- this.isa = server.getListenerAddress();
+ InetSocketAddress address = server.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
+ this.isa = address;
this.server.start();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
index a4e55d9..a37ba11 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
@@ -180,9 +180,12 @@ public class TestRpcHandlerException {
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build();
PayloadCarryingRpcController controller =
new PayloadCarryingRpcController(CellUtil.createCellScanner(ImmutableList.of(CELL)));
-
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
client.call(controller, md, param, md.getOutputType().toProto(), User.getCurrent(),
- rpcServer.getListenerAddress(), new MetricsConnection.CallStats());
+ address, new MetricsConnection.CallStats());
} catch (Throwable e) {
assert(abortable.isAborted() == true);
} finally {
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
index 97e69b7..a3ac177 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -115,13 +116,16 @@ public class TestRSKilledWhenInitializing {
@Override
protected void handleReportForDutyResponse(RegionServerStartupResponse c) throws IOException {
if (firstRS.getAndSet(false)) {
+ InetSocketAddress address = super.getRpcServer().getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
for (NameStringPair e : c.getMapEntriesList()) {
String key = e.getName();
// The hostname the master sees us as.
if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) {
String hostnameFromMasterPOV = e.getValue();
- assertEquals(super.getRpcServer().getListenerAddress().getHostName(),
- hostnameFromMasterPOV);
+ assertEquals(address.getHostName(), hostnameFromMasterPOV);
}
}
while (!masterActive) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java
index 8eff063..66b8c75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java
@@ -143,11 +143,14 @@ public class TestSecureRPC {
RpcClient rpcClient =
RpcClientFactory.createClient(conf, HConstants.DEFAULT_CLUSTER_ID.toString());
try {
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
BlockingRpcChannel channel =
rpcClient.createBlockingRpcChannel(
- ServerName.valueOf(rpcServer.getListenerAddress().getHostName(), rpcServer
- .getListenerAddress().getPort(), System.currentTimeMillis()), User.getCurrent(),
- 5000);
+ ServerName.valueOf(address.getHostName(), address.getPort(),
+ System.currentTimeMillis()), User.getCurrent(), 5000);
TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub =
TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel);
List<Integer> results = new ArrayList<Integer>();
http://git-wip-us.apache.org/repos/asf/hbase/blob/efb82957/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index c83e502..69c6e63 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -141,7 +141,11 @@ public class TestTokenAuthentication {
AuthenticationProtos.AuthenticationService.BlockingInterface.class));
this.rpcServer =
new RpcServer(this, "tokenServer", sai, initialIsa, conf, new FifoRpcScheduler(conf, 1));
- this.isa = this.rpcServer.getListenerAddress();
+ InetSocketAddress address = rpcServer.getListenerAddress();
+ if (address == null) {
+ throw new IOException("Listener channel is closed");
+ }
+ this.isa = address;
this.sleeper = new Sleeper(1000, this);
}
[04/17] hbase git commit: HBASE-14661 RegionServer link is not
opening, in HBase Table page (Y. SREENIVASULU REDDY)
Posted by sy...@apache.org.
HBASE-14661 RegionServer link is not opening, in HBase Table page (Y. SREENIVASULU REDDY)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/928dade1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/928dade1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/928dade1
Branch: refs/heads/hbase-12439
Commit: 928dade1da5d83df2fc3d9b84160da0eec4efee2
Parents: d0944f8
Author: stack <st...@apache.org>
Authored: Mon Oct 26 14:57:28 2015 -0700
Committer: stack <st...@apache.org>
Committed: Mon Oct 26 14:57:28 2015 -0700
----------------------------------------------------------------------
hbase-server/src/main/resources/hbase-webapps/master/table.jsp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/928dade1/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 4d85eea..f132b2b 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -313,7 +313,7 @@ if ( fqtn != null ) {
conf))) %></td>
<%
if (addr != null) {
- String url = "//" + addr.getHostname() + ":" + master.getRegionServerInfoPort(addr) + "/";
+ String url = "//" + addr.getHostname() + ":" + master.getRegionServerInfoPort(addr) + "/rs-status";
%>
<td>
<a href="<%= url %>"><%= addr.getHostname().toString() + ":" + addr.getPort() %></a>
[03/17] hbase git commit: HBASE-14698 Set category timeouts on
TestScanner and TestNamespaceAuditor; ADDENDUM
Posted by sy...@apache.org.
HBASE-14698 Set category timeouts on TestScanner and TestNamespaceAuditor; ADDENDUM
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d0944f82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d0944f82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d0944f82
Branch: refs/heads/hbase-12439
Commit: d0944f8219c4258f948e7470419ae683cc4194db
Parents: 0f6ec61
Author: stack <st...@apache.org>
Authored: Mon Oct 26 13:28:41 2015 -0700
Committer: stack <st...@apache.org>
Committed: Mon Oct 26 13:28:41 2015 -0700
----------------------------------------------------------------------
.../apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java | 3 +++
1 file changed, 3 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d0944f82/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index 9863fac..64c74e8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -34,6 +34,9 @@ import java.util.Random;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil;
[11/17] hbase git commit: HBASE-14694 Scan copy constructor doesn't
handle allowPartialResults
Posted by sy...@apache.org.
HBASE-14694 Scan copy constructor doesn't handle allowPartialResults
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a321da20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a321da20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a321da20
Branch: refs/heads/hbase-12439
Commit: a321da2039a50cdb8e1b8588b4faa6f1689e2ab3
Parents: 4269c7f
Author: tedyu <yu...@gmail.com>
Authored: Tue Oct 27 07:51:04 2015 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue Oct 27 07:51:04 2015 -0700
----------------------------------------------------------------------
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/a321da20/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 9d46bc7..3e68158 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -230,6 +230,7 @@ public class Scan extends Query {
reversed = scan.isReversed();
asyncPrefetch = scan.isAsyncPrefetch();
small = scan.isSmall();
+ allowPartialResults = scan.getAllowPartialResults();
TimeRange ctr = scan.getTimeRange();
tr = new TimeRange(ctr.getMin(), ctr.getMax());
Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
[09/17] hbase git commit: HBASE-14702
TestZKProcedureControllers.testZKCoordinatorControllerWithSingleMemberCohort
is a flakey
Posted by sy...@apache.org.
HBASE-14702 TestZKProcedureControllers.testZKCoordinatorControllerWithSingleMemberCohort is a flakey
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac746e09
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac746e09
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac746e09
Branch: refs/heads/hbase-12439
Commit: ac746e09056192e4957766f06b4f94d7c5c34d18
Parents: 496d20c
Author: stack <st...@apache.org>
Authored: Mon Oct 26 22:35:22 2015 -0700
Committer: stack <st...@apache.org>
Committed: Mon Oct 26 22:35:22 2015 -0700
----------------------------------------------------------------------
.../apache/hadoop/hbase/procedure/TestZKProcedureControllers.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/ac746e09/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
index a3e91ab..f639271 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
@@ -362,7 +362,8 @@ public class TestZKProcedureControllers {
String operationName, byte[] data) {
// verify(member, Mockito.times(cohortSize)).submitSubprocedure(Mockito.eq(operationName),
// (byte[]) Mockito.argThat(new ArrayEquals(data)));
- verify(member, Mockito.times(cohortSize)).submitSubprocedure(Mockito.any(Subprocedure.class));
+ Mockito.verify(member,
+ Mockito.atLeast(cohortSize)).submitSubprocedure(Mockito.any(Subprocedure.class));
}
[14/17] hbase git commit: HBASE-14688 Cleanup MOB tests
Posted by sy...@apache.org.
HBASE-14688 Cleanup MOB tests
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c91bfff5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c91bfff5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c91bfff5
Branch: refs/heads/hbase-12439
Commit: c91bfff5862fd38b3d301e3371d2f643d0c501ea
Parents: e04e740
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Tue Oct 27 10:28:07 2015 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Tue Oct 27 11:37:39 2015 -0700
----------------------------------------------------------------------
.../hadoop/hbase/HBaseTestingUtility.java | 7 +-
.../apache/hadoop/hbase/mob/MobTestUtil.java | 42 +++--
.../hadoop/hbase/mob/TestCachedMobFile.java | 31 ++--
.../hbase/mob/TestDefaultMobStoreFlusher.java | 181 ++++++-------------
.../hbase/mob/TestExpiredMobFileCleaner.java | 8 +-
.../hbase/mob/TestMobDataBlockEncoding.java | 19 +-
.../apache/hadoop/hbase/mob/TestMobFile.java | 16 +-
.../hbase/mob/compactions/TestMobCompactor.java | 28 +--
.../TestPartitionedMobCompactor.java | 95 +++-------
.../hbase/regionserver/TestDeleteMobTable.java | 155 +++++++---------
.../regionserver/TestMobStoreCompaction.java | 31 ++--
.../hbase/regionserver/TestMobStoreScanner.java | 114 ++++--------
.../hadoop/hbase/util/BaseTestHBaseFsck.java | 5 +-
13 files changed, 265 insertions(+), 467 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ab7b51c..eb1494e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1845,7 +1845,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
//
// ==========================================================================
-
+
/**
* Provide an existing table name to truncate.
* Scans the table and issues a delete for each row read.
@@ -2141,7 +2141,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* Return the number of rows in the given table.
*/
public int countRows(final Table table) throws IOException {
- Scan scan = new Scan();
+ return countRows(table, new Scan());
+ }
+
+ public int countRows(final Table table, final Scan scan) throws IOException {
ResultScanner results = table.getScanner(scan);
int count = 0;
for (@SuppressWarnings("unused") Result res : results) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
index bc3354b..3e0e2d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
@@ -19,11 +19,16 @@
package org.apache.hadoop.hbase.mob;
import java.io.IOException;
+import java.util.List;
import java.util.Random;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
@@ -73,15 +78,30 @@ public class MobTestUtil {
/**
* Compare two Cells only for their row family qualifier value
*/
- public static void assertCellEquals(Cell firstKeyValue,
- Cell secondKeyValue) {
- Assert.assertEquals(Bytes.toString(CellUtil.cloneRow(firstKeyValue)),
- Bytes.toString(CellUtil.cloneRow(secondKeyValue)));
- Assert.assertEquals(Bytes.toString(CellUtil.cloneFamily(firstKeyValue)),
- Bytes.toString(CellUtil.cloneFamily(secondKeyValue)));
- Assert.assertEquals(Bytes.toString(CellUtil.cloneQualifier(firstKeyValue)),
- Bytes.toString(CellUtil.cloneQualifier(secondKeyValue)));
- Assert.assertEquals(Bytes.toString(CellUtil.cloneValue(firstKeyValue)),
- Bytes.toString(CellUtil.cloneValue(secondKeyValue)));
- }
+ public static void assertCellEquals(Cell firstKeyValue, Cell secondKeyValue) {
+ Assert.assertArrayEquals(CellUtil.cloneRow(firstKeyValue),
+ CellUtil.cloneRow(secondKeyValue));
+ Assert.assertArrayEquals(CellUtil.cloneFamily(firstKeyValue),
+ CellUtil.cloneFamily(secondKeyValue));
+ Assert.assertArrayEquals(CellUtil.cloneQualifier(firstKeyValue),
+ CellUtil.cloneQualifier(secondKeyValue));
+ Assert.assertArrayEquals(CellUtil.cloneValue(firstKeyValue),
+ CellUtil.cloneValue(secondKeyValue));
+ }
+
+ public static void assertCellsValue(Table table, Scan scan,
+ byte[] expectedValue, int expectedCount) throws IOException {
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ Assert.assertArrayEquals(expectedValue, CellUtil.cloneValue(cell));
+ count++;
+ }
+ }
+ results.close();
+ Assert.assertEquals(expectedCount, count);
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
index b38e7cb..4bb525d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -43,10 +44,9 @@ import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestCachedMobFile extends TestCase{
static final Log LOG = LogFactory.getLog(TestCachedMobFile.class);
- private Configuration conf = HBaseConfiguration.create();
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private Configuration conf = TEST_UTIL.getConfiguration();
private CacheConfig cacheConf = new CacheConfig(conf);
- private static final String TABLE = "tableName";
- private static final String FAMILY = "familyName";
private static final String FAMILY1 = "familyName1";
private static final String FAMILY2 = "familyName2";
private static final long EXPECTED_REFERENCE_ZERO = 0;
@@ -56,13 +56,11 @@ public class TestCachedMobFile extends TestCase{
@Test
public void testOpenClose() throws Exception {
String caseName = getName();
- FileSystem fs = FileSystem.get(conf);
- Path testDir = FSUtils.getRootDir(conf);
- Path outputDir = new Path(new Path(testDir, TABLE),
- FAMILY);
+ Path testDir = TEST_UTIL.getDataTestDir();
+ FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
- .withOutputDir(outputDir).withFileContext(meta).build();
+ .withOutputDir(testDir).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer, caseName);
CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
@@ -79,17 +77,15 @@ public class TestCachedMobFile extends TestCase{
@Test
public void testCompare() throws Exception {
String caseName = getName();
- FileSystem fs = FileSystem.get(conf);
- Path testDir = FSUtils.getRootDir(conf);
- Path outputDir1 = new Path(new Path(testDir, TABLE),
- FAMILY1);
+ Path testDir = TEST_UTIL.getDataTestDir();
+ FileSystem fs = testDir.getFileSystem(conf);
+ Path outputDir1 = new Path(testDir, FAMILY1);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFile.Writer writer1 = new StoreFile.WriterBuilder(conf, cacheConf, fs)
.withOutputDir(outputDir1).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer1, caseName);
CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf);
- Path outputDir2 = new Path(new Path(testDir, TABLE),
- FAMILY2);
+ Path outputDir2 = new Path(testDir, FAMILY2);
StoreFile.Writer writer2 = new StoreFile.WriterBuilder(conf, cacheConf, fs)
.withOutputDir(outputDir2)
.withFileContext(meta)
@@ -105,12 +101,11 @@ public class TestCachedMobFile extends TestCase{
@Test
public void testReadKeyValue() throws Exception {
- FileSystem fs = FileSystem.get(conf);
- Path testDir = FSUtils.getRootDir(conf);
- Path outputDir = new Path(new Path(testDir, TABLE), "familyname");
+ Path testDir = TEST_UTIL.getDataTestDir();
+ FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
- .withOutputDir(outputDir).withFileContext(meta).build();
+ .withOutputDir(testDir).withFileContext(meta).build();
String caseName = getName();
MobTestUtil.writeStoreFile(writer, caseName);
CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
index b91d4d1..94a2ed4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
@@ -18,7 +18,6 @@
*/
package org.apache.hadoop.hbase.mob;
-import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
@@ -26,9 +25,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -64,127 +61,67 @@ public class TestDefaultMobStoreFlusher {
}
@Test
- public void testFlushNonMobFile() throws InterruptedException {
- String TN = "testFlushNonMobFile";
- TableName tn = TableName.valueOf(TN);
- Table table = null;
- HBaseAdmin admin = null;
-
- try {
- HTableDescriptor desc = new HTableDescriptor(tn);
- HColumnDescriptor hcd = new HColumnDescriptor(family);
- hcd.setMaxVersions(4);
- desc.addFamily(hcd);
-
- admin = TEST_UTIL.getHBaseAdmin();
- admin.createTable(desc);
- table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
- .getTable(TableName.valueOf(TN));
-
- //Put data
- Put put0 = new Put(row1);
- put0.addColumn(family, qf1, 1, value1);
- table.put(put0);
-
- //Put more data
- Put put1 = new Put(row2);
- put1.addColumn(family, qf2, 1, value2);
- table.put(put1);
-
- //Flush
- admin.flush(tn);
-
- Scan scan = new Scan();
- scan.addColumn(family, qf1);
- scan.setMaxVersions(4);
- ResultScanner scanner = table.getScanner(scan);
-
- //Compare
- Result result = scanner.next();
- int size = 0;
- while (result != null) {
- size++;
- List<Cell> cells = result.getColumnCells(family, qf1);
- // Verify the cell size
- Assert.assertEquals(1, cells.size());
- // Verify the value
- Assert.assertEquals(Bytes.toString(value1),
- Bytes.toString(CellUtil.cloneValue(cells.get(0))));
- result = scanner.next();
- }
- scanner.close();
- Assert.assertEquals(1, size);
- admin.close();
- } catch (MasterNotRunningException e1) {
- e1.printStackTrace();
- } catch (ZooKeeperConnectionException e2) {
- e2.printStackTrace();
- } catch (IOException e3) {
- e3.printStackTrace();
- }
+ public void testFlushNonMobFile() throws Exception {
+ TableName tn = TableName.valueOf("testFlushNonMobFile");
+ HTableDescriptor desc = new HTableDescriptor(tn);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
+ testFlushFile(desc);
}
@Test
- public void testFlushMobFile() throws InterruptedException {
- String TN = "testFlushMobFile";
- TableName tn = TableName.valueOf(TN);
- Table table = null;
- Admin admin = null;
-
- try {
- HTableDescriptor desc = new HTableDescriptor(tn);
- HColumnDescriptor hcd = new HColumnDescriptor(family);
- hcd.setMobEnabled(true);
- hcd.setMobThreshold(3L);
- hcd.setMaxVersions(4);
- desc.addFamily(hcd);
-
- Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
- admin = c.getAdmin();
- admin.createTable(desc);
- table = c.getTable(TableName.valueOf(TN));
-
- //put data
- Put put0 = new Put(row1);
- put0.addColumn(family, qf1, 1, value1);
- table.put(put0);
-
- //put more data
- Put put1 = new Put(row2);
- put1.addColumn(family, qf2, 1, value2);
- table.put(put1);
-
- //flush
- admin.flush(tn);
-
- //Scan
- Scan scan = new Scan();
- scan.addColumn(family, qf1);
- scan.setMaxVersions(4);
- ResultScanner scanner = table.getScanner(scan);
-
- //Compare
- Result result = scanner.next();
- int size = 0;
- while (result != null) {
- size++;
- List<Cell> cells = result.getColumnCells(family, qf1);
- // Verify the the cell size
- Assert.assertEquals(1, cells.size());
- // Verify the value
- Assert.assertEquals(Bytes.toString(value1),
- Bytes.toString(CellUtil.cloneValue(cells.get(0))));
- result = scanner.next();
- }
- scanner.close();
- Assert.assertEquals(1, size);
- admin.close();
- } catch (MasterNotRunningException e1) {
- e1.printStackTrace();
- } catch (ZooKeeperConnectionException e2) {
- e2.printStackTrace();
- } catch (IOException e3) {
- e3.printStackTrace();
- }
+ public void testFlushMobFile() throws Exception {
+ TableName tn = TableName.valueOf("testFlushMobFile");
+ HTableDescriptor desc = new HTableDescriptor(tn);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
+ testFlushFile(desc);
}
+
+ private void testFlushFile(HTableDescriptor htd) throws Exception {
+ Table table = null;
+ try {
+ table = TEST_UTIL.createTable(htd, null);
+
+ //put data
+ Put put0 = new Put(row1);
+ put0.addColumn(family, qf1, 1, value1);
+ table.put(put0);
+
+ //put more data
+ Put put1 = new Put(row2);
+ put1.addColumn(family, qf2, 1, value2);
+ table.put(put1);
+
+ //flush
+ TEST_UTIL.flush(htd.getTableName());
+
+ //Scan
+ Scan scan = new Scan();
+ scan.addColumn(family, qf1);
+ scan.setMaxVersions(4);
+ ResultScanner scanner = table.getScanner(scan);
+
+ //Compare
+ int size = 0;
+ for (Result result: scanner) {
+ size++;
+ List<Cell> cells = result.getColumnCells(family, qf1);
+ // Verify the cell size
+ Assert.assertEquals(1, cells.size());
+ // Verify the value
+ Assert.assertArrayEquals(value1, CellUtil.cloneValue(cells.get(0)));
+ }
+ scanner.close();
+ Assert.assertEquals(1, size);
+ } finally {
+ table.close();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
index dfaeca6..fff6f44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
import java.util.Random;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -124,7 +123,7 @@ public class TestExpiredMobFileCleaner {
public void testCleaner() throws Exception {
init();
- Path mobDirPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
+ Path mobDirPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
byte[] dummyData = makeDummyData(600);
long ts = System.currentTimeMillis() - 3 * secondsOfDay() * 1000; // 3 days before
@@ -158,11 +157,6 @@ public class TestExpiredMobFileCleaner {
assertEquals("After cleanup without delay 2", secondFile, lastFile);
}
- private Path getMobFamilyPath(Configuration conf, TableName tableName, String familyName) {
- Path p = new Path(MobUtils.getMobRegionPath(conf, tableName), familyName);
- return p;
- }
-
private int secondsOfDay() {
return 24 * 3600;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
index 15aa7d4..8ad6f95 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
@@ -18,11 +18,8 @@
*/
package org.apache.hadoop.hbase.mob;
-import java.util.List;
import java.util.Random;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -32,7 +29,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
-import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -117,19 +113,6 @@ public class TestMobDataBlockEncoding {
Scan scan = new Scan();
scan.setMaxVersions(4);
-
- ResultScanner results = table.getScanner(scan);
- int count = 0;
- for (Result res : results) {
- List<Cell> cells = res.listCells();
- for(Cell cell : cells) {
- // Verify the value
- Assert.assertEquals(Bytes.toString(value),
- Bytes.toString(CellUtil.cloneValue(cell)));
- count++;
- }
- }
- results.close();
- Assert.assertEquals(3, count);
+ MobTestUtil.assertCellsValue(table, scan, value, 3);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
index d05da24..e2dced9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
@@ -47,17 +47,14 @@ public class TestMobFile extends TestCase {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Configuration conf = TEST_UTIL.getConfiguration();
private CacheConfig cacheConf = new CacheConfig(conf);
- private final String TABLE = "tableName";
- private final String FAMILY = "familyName";
@Test
public void testReadKeyValue() throws Exception {
- FileSystem fs = FileSystem.get(conf);
- Path testDir = FSUtils.getRootDir(conf);
- Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
+ Path testDir = TEST_UTIL.getDataTestDir();
+ FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
- .withOutputDir(outputDir)
+ .withOutputDir(testDir)
.withFileContext(meta)
.build();
String caseName = getName();
@@ -106,12 +103,11 @@ public class TestMobFile extends TestCase {
@Test
public void testGetScanner() throws Exception {
- FileSystem fs = FileSystem.get(conf);
- Path testDir = FSUtils.getRootDir(conf);
- Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
+ Path testDir = TEST_UTIL.getDataTestDir();
+ FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
- .withOutputDir(outputDir)
+ .withOutputDir(testDir)
.withFileContext(meta)
.build();
MobTestUtil.writeStoreFile(writer, getName());
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
index 6da7655..9922aff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
@@ -413,8 +413,8 @@ public class TestMobCompactor {
result = table.get(get);
cell = result.getColumnLatestCell(hcd1.getName(), Bytes.toBytes(qf1));
// the ref name is the new file
- Path mobFamilyPath = new Path(
- MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(), tableName), hcd1.getNameAsString());
+ Path mobFamilyPath =
+ MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, hcd1.getNameAsString());
List<Path> paths = new ArrayList<Path>();
if (fs.exists(mobFamilyPath)) {
FileStatus[] files = fs.listStatus(mobFamilyPath);
@@ -495,13 +495,7 @@ public class TestMobCompactor {
Scan scan = new Scan();
// Do not retrieve the mob data when scanning
scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
- ResultScanner results = table.getScanner(scan);
- int count = 0;
- for (Result res : results) {
- count++;
- }
- results.close();
- return count;
+ return TEST_UTIL.countRows(table, scan);
}
/**
@@ -532,8 +526,7 @@ public class TestMobCompactor {
*/
private int countFiles(TableName tableName, boolean isMobFile, String familyName)
throws IOException {
- Path mobDirPath = MobUtils.getMobFamilyPath(
- MobUtils.getMobRegionPath(conf, tableName), familyName);
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
int count = 0;
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
@@ -553,8 +546,7 @@ public class TestMobCompactor {
}
private boolean verifyEncryption(TableName tableName, String familyName) throws IOException {
- Path mobDirPath = MobUtils.getMobFamilyPath(MobUtils.getMobRegionPath(conf, tableName),
- familyName);
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
boolean hasFiles = false;
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
@@ -579,8 +571,7 @@ public class TestMobCompactor {
* @return the number of the HFileLink
*/
private int countHFileLinks(String familyName) throws IOException {
- Path mobDirPath = MobUtils.getMobFamilyPath(
- MobUtils.getMobRegionPath(conf, tableName), familyName);
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
int count = 0;
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
@@ -601,8 +592,7 @@ public class TestMobCompactor {
* @return the number of files large than the size
*/
private int countLargeFiles(int size, TableName tableName, String familyName) throws IOException {
- Path mobDirPath = MobUtils.getMobFamilyPath(MobUtils.getMobRegionPath(conf, tableName),
- familyName);
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
int count = 0;
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
@@ -729,8 +719,8 @@ public class TestMobCompactor {
// Do not retrieve the mob data when scanning
scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
ResultScanner results = table.getScanner(scan);
- Path mobFamilyPath = new Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(),
- tableName), familyName);
+ Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(),
+ tableName, familyName);
List<Path> actualFilePaths = new ArrayList<>();
List<Path> expectFilePaths = new ArrayList<>();
for (Result res : results) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
index aba63c6..6774ac3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
@@ -104,56 +104,27 @@ public class TestPartitionedMobCompactor {
@Test
public void testCompactionSelectWithAllFiles() throws Exception {
- resetConf();
String tableName = "testCompactionSelectWithAllFiles";
- init(tableName);
- int count = 10;
- // create 10 mob files.
- createStoreFiles(basePath, family, qf, count, Type.Put);
- // create 10 del files
- createStoreFiles(basePath, family, qf, count, Type.Delete);
- listFiles();
- long mergeSize = MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD;
- List<String> expectedStartKeys = new ArrayList<>();
- for(FileStatus file : mobFiles) {
- if(file.getLen() < mergeSize) {
- String fileName = file.getPath().getName();
- String startKey = fileName.substring(0, 32);
- expectedStartKeys.add(startKey);
- }
- }
- testSelectFiles(tableName, CompactionType.ALL_FILES, false, expectedStartKeys);
+ testCompactionAtMergeSize(tableName, MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD,
+ CompactionType.ALL_FILES, false);
}
@Test
public void testCompactionSelectWithPartFiles() throws Exception {
- resetConf();
String tableName = "testCompactionSelectWithPartFiles";
- init(tableName);
- int count = 10;
- // create 10 mob files.
- createStoreFiles(basePath, family, qf, count, Type.Put);
- // create 10 del files
- createStoreFiles(basePath, family, qf, count, Type.Delete);
- listFiles();
- long mergeSize = 4000;
- List<String> expectedStartKeys = new ArrayList<>();
- for(FileStatus file : mobFiles) {
- if(file.getLen() < 4000) {
- String fileName = file.getPath().getName();
- String startKey = fileName.substring(0, 32);
- expectedStartKeys.add(startKey);
- }
- }
- // set the mob compaction mergeable threshold
- conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
- testSelectFiles(tableName, CompactionType.PART_FILES, false, expectedStartKeys);
+ testCompactionAtMergeSize(tableName, 4000, CompactionType.PART_FILES, false);
}
@Test
public void testCompactionSelectWithForceAllFiles() throws Exception {
- resetConf();
String tableName = "testCompactionSelectWithForceAllFiles";
+ testCompactionAtMergeSize(tableName, Long.MAX_VALUE, CompactionType.ALL_FILES, true);
+ }
+
+ private void testCompactionAtMergeSize(final String tableName,
+ final long mergeSize, final CompactionType type, final boolean isForceAllFiles)
+ throws Exception {
+ resetConf();
init(tableName);
int count = 10;
// create 10 mob files.
@@ -161,51 +132,41 @@ public class TestPartitionedMobCompactor {
// create 10 del files
createStoreFiles(basePath, family, qf, count, Type.Delete);
listFiles();
- long mergeSize = 4000;
List<String> expectedStartKeys = new ArrayList<>();
for(FileStatus file : mobFiles) {
- String fileName = file.getPath().getName();
- String startKey = fileName.substring(0, 32);
- expectedStartKeys.add(startKey);
+ if(file.getLen() < mergeSize) {
+ String fileName = file.getPath().getName();
+ String startKey = fileName.substring(0, 32);
+ expectedStartKeys.add(startKey);
+ }
}
// set the mob compaction mergeable threshold
conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
- testSelectFiles(tableName, CompactionType.ALL_FILES, true, expectedStartKeys);
+ testSelectFiles(tableName, type, isForceAllFiles, expectedStartKeys);
}
@Test
public void testCompactDelFilesWithDefaultBatchSize() throws Exception {
- resetConf();
String tableName = "testCompactDelFilesWithDefaultBatchSize";
- init(tableName);
- // create 20 mob files.
- createStoreFiles(basePath, family, qf, 20, Type.Put);
- // create 13 del files
- createStoreFiles(basePath, family, qf, 13, Type.Delete);
- listFiles();
- testCompactDelFiles(tableName, 1, 13, false);
+ testCompactDelFilesAtBatchSize(tableName, MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE,
+ MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
}
@Test
public void testCompactDelFilesWithSmallBatchSize() throws Exception {
- resetConf();
String tableName = "testCompactDelFilesWithSmallBatchSize";
- init(tableName);
- // create 20 mob files.
- createStoreFiles(basePath, family, qf, 20, Type.Put);
- // create 13 del files
- createStoreFiles(basePath, family, qf, 13, Type.Delete);
- listFiles();
-
- // set the mob compaction batch size
- conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, 4);
- testCompactDelFiles(tableName, 1, 13, false);
+ testCompactDelFilesAtBatchSize(tableName, 4, MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
}
@Test
public void testCompactDelFilesChangeMaxDelFileCount() throws Exception {
- resetConf();
String tableName = "testCompactDelFilesWithSmallBatchSize";
+ testCompactDelFilesAtBatchSize(tableName, 4, 2);
+ }
+
+ private void testCompactDelFilesAtBatchSize(String tableName, int batchSize,
+ int delfileMaxCount) throws Exception {
+ resetConf();
init(tableName);
// create 20 mob files.
createStoreFiles(basePath, family, qf, 20, Type.Put);
@@ -214,10 +175,10 @@ public class TestPartitionedMobCompactor {
listFiles();
// set the max del file count
- conf.setInt(MobConstants.MOB_DELFILE_MAX_COUNT, 5);
+ conf.setInt(MobConstants.MOB_DELFILE_MAX_COUNT, delfileMaxCount);
// set the mob compaction batch size
- conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, 2);
- testCompactDelFiles(tableName, 4, 13, false);
+ conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, batchSize);
+ testCompactDelFiles(tableName, 1, 13, false);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
index 6312653..779fdca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
@@ -74,132 +73,110 @@ public class TestDeleteMobTable {
return mobVal;
}
- @Test
- public void testDeleteMobTable() throws Exception {
- byte[] tableName = Bytes.toBytes("testDeleteMobTable");
- TableName tn = TableName.valueOf(tableName);
- HTableDescriptor htd = new HTableDescriptor(tn);
+ private HTableDescriptor createTableDescriptor(TableName tableName, boolean hasMob) {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
- hcd.setMobEnabled(true);
- hcd.setMobThreshold(0);
+ if (hasMob) {
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0);
+ }
htd.addFamily(hcd);
- HBaseAdmin admin = null;
- Table table = null;
+ return htd;
+ }
+
+ private Table createTableWithOneFile(HTableDescriptor htd) throws IOException {
+ Table table = TEST_UTIL.createTable(htd, null);
try {
- admin = TEST_UTIL.getHBaseAdmin();
- admin.createTable(htd);
- table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn);
+ // insert data
byte[] value = generateMobValue(10);
-
byte[] row = Bytes.toBytes("row");
Put put = new Put(row);
put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
table.put(put);
- admin.flush(tn);
+ // create an hfile
+ TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
+ } catch (IOException e) {
+ table.close();
+ throw e;
+ }
+ return table;
+ }
+
+ @Test
+ public void testDeleteMobTable() throws Exception {
+ TableName tn = TableName.valueOf("testDeleteMobTable");
+ HTableDescriptor htd = createTableDescriptor(tn, true);
+ HColumnDescriptor hcd = htd.getFamily(FAMILY);
+ String fileName = null;
+ Table table = createTableWithOneFile(htd);
+ try {
// the mob file exists
Assert.assertEquals(1, countMobFiles(tn, hcd.getNameAsString()));
Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
- String fileName = assertHasOneMobRow(table, tn, hcd.getNameAsString());
+ fileName = assertHasOneMobRow(table, tn, hcd.getNameAsString());
Assert.assertFalse(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
Assert.assertTrue(mobTableDirExist(tn));
- table.close();
-
- admin.disableTable(tn);
- admin.deleteTable(tn);
-
- Assert.assertFalse(admin.tableExists(tn));
- Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
- Assert.assertEquals(1, countArchiveMobFiles(tn, hcd.getNameAsString()));
- Assert.assertTrue(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
- Assert.assertFalse(mobTableDirExist(tn));
} finally {
- if (admin != null) {
- admin.close();
- }
+ table.close();
+ TEST_UTIL.deleteTable(tn);
}
+
+ Assert.assertFalse(TEST_UTIL.getHBaseAdmin().tableExists(tn));
+ Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertEquals(1, countArchiveMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertTrue(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
+ Assert.assertFalse(mobTableDirExist(tn));
}
@Test
public void testDeleteNonMobTable() throws Exception {
- byte[] tableName = Bytes.toBytes("testDeleteNonMobTable");
- TableName tn = TableName.valueOf(tableName);
- HTableDescriptor htd = new HTableDescriptor(tn);
- HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
- htd.addFamily(hcd);
- HBaseAdmin admin = null;
- Table table = null;
- try {
- admin = TEST_UTIL.getHBaseAdmin();
- admin.createTable(htd);
- table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn);
- byte[] value = generateMobValue(10);
-
- byte[] row = Bytes.toBytes("row");
- Put put = new Put(row);
- put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
- table.put(put);
-
- admin.flush(tn);
- table.close();
+ TableName tn = TableName.valueOf("testDeleteNonMobTable");
+ HTableDescriptor htd = createTableDescriptor(tn, false);
+ HColumnDescriptor hcd = htd.getFamily(FAMILY);
+ Table table = createTableWithOneFile(htd);
+ try {
// the mob file doesn't exist
Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
Assert.assertFalse(mobTableDirExist(tn));
-
- admin.disableTable(tn);
- admin.deleteTable(tn);
-
- Assert.assertFalse(admin.tableExists(tn));
- Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
- Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
- Assert.assertFalse(mobTableDirExist(tn));
} finally {
- if (admin != null) {
- admin.close();
- }
+ table.close();
+ TEST_UTIL.deleteTable(tn);
}
+
+ Assert.assertFalse(TEST_UTIL.getHBaseAdmin().tableExists(tn));
+ Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertFalse(mobTableDirExist(tn));
}
@Test
public void testMobFamilyDelete() throws Exception {
TableName tn = TableName.valueOf("testMobFamilyDelete");
- HTableDescriptor htd = new HTableDescriptor(tn);
- HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
- hcd.setMobEnabled(true);
- hcd.setMobThreshold(0);
- htd.addFamily(hcd);
+ HTableDescriptor htd = createTableDescriptor(tn, true);
+ HColumnDescriptor hcd = htd.getFamily(FAMILY);
htd.addFamily(new HColumnDescriptor(Bytes.toBytes("family2")));
- HBaseAdmin admin = null;
- Table table = null;
+
+ Table table = createTableWithOneFile(htd);
try {
- admin = TEST_UTIL.getHBaseAdmin();
- admin.createTable(htd);
- table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn);
- byte[] value = generateMobValue(10);
- byte[] row = Bytes.toBytes("row");
- Put put = new Put(row);
- put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
- table.put(put);
- admin.flush(tn);
// the mob file exists
Assert.assertEquals(1, countMobFiles(tn, hcd.getNameAsString()));
Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
String fileName = assertHasOneMobRow(table, tn, hcd.getNameAsString());
Assert.assertFalse(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
Assert.assertTrue(mobTableDirExist(tn));
- admin.deleteColumnFamily(tn, FAMILY);
+
+ TEST_UTIL.getHBaseAdmin().deleteColumnFamily(tn, FAMILY);
+
Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
Assert.assertEquals(1, countArchiveMobFiles(tn, hcd.getNameAsString()));
Assert.assertTrue(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
- Assert.assertFalse(mobColumnFamilyDirExist(tn));
+ Assert.assertFalse(mobColumnFamilyDirExist(tn, hcd.getNameAsString()));
} finally {
table.close();
- if (admin != null) {
- admin.close();
- }
TEST_UTIL.deleteTable(tn);
}
}
@@ -209,9 +186,8 @@ public class TestDeleteMobTable {
Path mobFileDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName);
if (fs.exists(mobFileDir)) {
return fs.listStatus(mobFileDir).length;
- } else {
- return 0;
}
+ return 0;
}
private int countArchiveMobFiles(TableName tn, String familyName)
@@ -221,9 +197,8 @@ public class TestDeleteMobTable {
MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName);
if (fs.exists(storePath)) {
return fs.listStatus(storePath).length;
- } else {
- return 0;
}
+ return 0;
}
private boolean mobTableDirExist(TableName tn) throws IOException {
@@ -232,12 +207,9 @@ public class TestDeleteMobTable {
return fs.exists(tableDir);
}
- private boolean mobColumnFamilyDirExist(TableName tn) throws IOException {
+ private boolean mobColumnFamilyDirExist(TableName tn, String familyName) throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
- Path tableDir = FSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn);
- HRegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tn);
- Path mobFamilyDir = new Path(tableDir, new Path(mobRegionInfo.getEncodedName(),
- Bytes.toString(FAMILY)));
+ Path mobFamilyDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName);
return fs.exists(mobFamilyDir);
}
@@ -256,8 +228,7 @@ public class TestDeleteMobTable {
ResultScanner rs = table.getScanner(scan);
Result r = rs.next();
Assert.assertNotNull(r);
- byte[] value = r.getValue(FAMILY, QF);
- String fileName = Bytes.toString(value, Bytes.SIZEOF_INT, value.length - Bytes.SIZEOF_INT);
+ String fileName = MobUtils.getMobFileName(r.getColumnLatestCell(FAMILY, QF));
Path filePath = new Path(
MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName), fileName);
FileSystem fs = TEST_UTIL.getTestFileSystem();
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
index 7cac301..f9ffc88 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
@@ -278,8 +278,7 @@ public class TestMobStoreCompaction {
}
private int countMobFiles() throws IOException {
- Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()),
- hcd.getNameAsString());
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, htd.getTableName(), hcd.getNameAsString());
if (fs.exists(mobDirPath)) {
FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath);
return files.length;
@@ -289,8 +288,7 @@ public class TestMobStoreCompaction {
private long countMobCellsInMetadata() throws IOException {
long mobCellsCount = 0;
- Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()),
- hcd.getNameAsString());
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, htd.getTableName(), hcd.getNameAsString());
Configuration copyOfConf = new Configuration(conf);
copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
CacheConfig cacheConfig = new CacheConfig(copyOfConf);
@@ -357,9 +355,16 @@ public class TestMobStoreCompaction {
private int countRows() throws IOException {
Scan scan = new Scan();
- // Do not retrieve the mob data when scanning
InternalScanner scanner = region.getScanner(scan);
+ try {
+ return countRows(scanner);
+ } finally {
+ scanner.close();
+ }
+ }
+ private int countRows(InternalScanner scanner) throws IOException {
+ // Do not retrieve the mob data when scanning
int scannedCount = 0;
List<Cell> results = new ArrayList<Cell>();
boolean hasMore = true;
@@ -368,8 +373,6 @@ public class TestMobStoreCompaction {
scannedCount += results.size();
results.clear();
}
- scanner.close();
-
return scannedCount;
}
@@ -423,8 +426,7 @@ public class TestMobStoreCompaction {
Configuration copyOfConf = new Configuration(conf);
copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
CacheConfig cacheConfig = new CacheConfig(copyOfConf);
- Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()),
- hcd.getNameAsString());
+ Path mobDirPath = MobUtils.getMobFamilyPath(conf, htd.getTableName(), hcd.getNameAsString());
List<StoreFile> sfs = new ArrayList<>();
int numDelfiles = 0;
int size = 0;
@@ -436,6 +438,7 @@ public class TestMobStoreCompaction {
numDelfiles++;
}
}
+
List scanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false,
HConstants.LATEST_TIMESTAMP);
Scan scan = new Scan();
@@ -446,12 +449,10 @@ public class TestMobStoreCompaction {
CellComparator.COMPARATOR);
StoreScanner scanner = new StoreScanner(scan, scanInfo, ScanType.COMPACT_DROP_DELETES, null,
scanners, 0L, HConstants.LATEST_TIMESTAMP);
- List<Cell> results = new ArrayList<>();
- boolean hasMore = true;
- while (hasMore) {
- hasMore = scanner.next(results);
- size += results.size();
- results.clear();
+ try {
+ size += countRows(scanner);
+ } finally {
+ scanner.close();
}
}
// assert the number of the existing del files
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
index 8250b47..f3af63a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
import org.apache.hadoop.hbase.io.hfile.TestHFile;
import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobTestUtil;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -196,12 +197,12 @@ public class TestMobStoreScanner {
table.put(put4);
Result result = rs.next();
Cell cell = result.getColumnLatestCell(family, qf1);
- Assert.assertEquals("value1", Bytes.toString(CellUtil.cloneValue(cell)));
+ Assert.assertArrayEquals(value1, CellUtil.cloneValue(cell));
admin.flush(tn);
result = rs.next();
cell = result.getColumnLatestCell(family, qf1);
- Assert.assertEquals("value2", Bytes.toString(CellUtil.cloneValue(cell)));
+ Assert.assertArrayEquals(value2, CellUtil.cloneValue(cell));
}
@Test
@@ -213,7 +214,7 @@ public class TestMobStoreScanner {
get.setAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS, Bytes.toBytes(true));
Result result = table.get(get);
Cell cell = result.getColumnLatestCell(family, qf1);
- Assert.assertEquals(0, CellUtil.cloneValue(cell).length);
+ Assert.assertEquals(0, cell.getValueLength());
}
@Test
@@ -249,8 +250,7 @@ public class TestMobStoreScanner {
private Path getFlushedMobFile(Configuration conf, FileSystem fs, TableName table, String family)
throws IOException {
- Path regionDir = MobUtils.getMobRegionPath(conf, table);
- Path famDir = new Path(regionDir, family);
+ Path famDir = MobUtils.getMobFamilyPath(conf, table, family);
FileStatus[] hfFss = fs.listStatus(famDir);
for (FileStatus hfs : hfFss) {
if (!hfs.isDirectory()) {
@@ -262,44 +262,21 @@ public class TestMobStoreScanner {
private void testGetFromFiles(boolean reversed) throws Exception {
TableName tn = TableName.valueOf("testGetFromFiles" + reversed);
- setUp(defaultThreshold, tn);
- long ts1 = System.currentTimeMillis();
- long ts2 = ts1 + 1;
- long ts3 = ts1 + 2;
- byte [] value = generateMobValue((int)defaultThreshold+1);
-
- Put put1 = new Put(row1);
- put1.addColumn(family, qf1, ts3, value);
- put1.addColumn(family, qf2, ts2, value);
- put1.addColumn(family, qf3, ts1, value);
- table.put(put1);
-
- admin.flush(tn);
-
- Scan scan = new Scan();
- setScan(scan, reversed, false);
-
- ResultScanner results = table.getScanner(scan);
- int count = 0;
- for (Result res : results) {
- List<Cell> cells = res.listCells();
- for(Cell cell : cells) {
- // Verify the value
- Assert.assertEquals(Bytes.toString(value),
- Bytes.toString(CellUtil.cloneValue(cell)));
- count++;
- }
- }
- results.close();
- Assert.assertEquals(3, count);
+ testGet(tn, reversed, true);
}
private void testGetFromMemStore(boolean reversed) throws Exception {
- setUp(defaultThreshold, TableName.valueOf("testGetFromMemStore" + reversed));
+ TableName tn = TableName.valueOf("testGetFromMemStore" + reversed);
+ testGet(tn, reversed, false);
+ }
+
+ private void testGet(TableName tableName, boolean reversed, boolean doFlush)
+ throws Exception {
+ setUp(defaultThreshold, tableName);
long ts1 = System.currentTimeMillis();
long ts2 = ts1 + 1;
long ts3 = ts1 + 2;
- byte [] value = generateMobValue((int)defaultThreshold+1);;
+ byte [] value = generateMobValue((int)defaultThreshold+1);
Put put1 = new Put(row1);
put1.addColumn(family, qf1, ts3, value);
@@ -307,22 +284,13 @@ public class TestMobStoreScanner {
put1.addColumn(family, qf3, ts1, value);
table.put(put1);
+ if (doFlush) {
+ admin.flush(tableName);
+ }
+
Scan scan = new Scan();
setScan(scan, reversed, false);
-
- ResultScanner results = table.getScanner(scan);
- int count = 0;
- for (Result res : results) {
- List<Cell> cells = res.listCells();
- for(Cell cell : cells) {
- // Verify the value
- Assert.assertEquals(Bytes.toString(value),
- Bytes.toString(CellUtil.cloneValue(cell)));
- count++;
- }
- }
- results.close();
- Assert.assertEquals(3, count);
+ MobTestUtil.assertCellsValue(table, scan, value, 3);
}
private void testGetReferences(boolean reversed) throws Exception {
@@ -426,8 +394,8 @@ public class TestMobStoreScanner {
// Get the files in the mob path
Path mobFamilyPath;
- mobFamilyPath = new Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(), tn),
- hcd.getNameAsString());
+ mobFamilyPath = MobUtils.getMobFamilyPath(
+ TEST_UTIL.getConfiguration(), tn, hcd.getNameAsString());
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
FileStatus[] files = fs.listStatus(mobFamilyPath);
@@ -458,19 +426,7 @@ public class TestMobStoreScanner {
// Scan from archive
Scan scan = new Scan();
setScan(scan, reversed, false);
- ResultScanner results = table.getScanner(scan);
- int count = 0;
- for (Result res : results) {
- List<Cell> cells = res.listCells();
- for(Cell cell : cells) {
- // Verify the value
- Assert.assertEquals(Bytes.toString(value),
- Bytes.toString(CellUtil.cloneValue(cell)));
- count++;
- }
- }
- results.close();
- Assert.assertEquals(3, count);
+ MobTestUtil.assertCellsValue(table, scan, value, 3);
}
/**
@@ -478,12 +434,9 @@ public class TestMobStoreScanner {
*/
private static void assertNotMobReference(Cell cell, byte[] row, byte[] family,
byte[] value) throws IOException {
- Assert.assertEquals(Bytes.toString(row),
- Bytes.toString(CellUtil.cloneRow(cell)));
- Assert.assertEquals(Bytes.toString(family),
- Bytes.toString(CellUtil.cloneFamily(cell)));
- Assert.assertTrue(Bytes.toString(value).equals(
- Bytes.toString(CellUtil.cloneValue(cell))));
+ Assert.assertArrayEquals(row, CellUtil.cloneRow(cell));
+ Assert.assertArrayEquals(family, CellUtil.cloneFamily(cell));
+ Assert.assertArrayEquals(value, CellUtil.cloneValue(cell));
}
/**
@@ -491,20 +444,15 @@ public class TestMobStoreScanner {
*/
private static void assertIsMobReference(Cell cell, byte[] row, byte[] family,
byte[] value, TableName tn) throws IOException {
- Assert.assertEquals(Bytes.toString(row),
- Bytes.toString(CellUtil.cloneRow(cell)));
- Assert.assertEquals(Bytes.toString(family),
- Bytes.toString(CellUtil.cloneFamily(cell)));
- Assert.assertFalse(Bytes.toString(value).equals(
- Bytes.toString(CellUtil.cloneValue(cell))));
+ Assert.assertArrayEquals(row, CellUtil.cloneRow(cell));
+ Assert.assertArrayEquals(family, CellUtil.cloneFamily(cell));
+ Assert.assertFalse(Bytes.equals(value, CellUtil.cloneValue(cell)));
byte[] referenceValue = CellUtil.cloneValue(cell);
- String fileName = Bytes.toString(referenceValue, Bytes.SIZEOF_INT,
- referenceValue.length - Bytes.SIZEOF_INT);
+ String fileName = MobUtils.getMobFileName(cell);
int valLen = Bytes.toInt(referenceValue, 0, Bytes.SIZEOF_INT);
Assert.assertEquals(value.length, valLen);
- Path mobFamilyPath;
- mobFamilyPath = new Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(),
- tn), hcd.getNameAsString());
+ Path mobFamilyPath = MobUtils.getMobFamilyPath(
+ TEST_UTIL.getConfiguration(), tn, hcd.getNameAsString());
Path targetPath = new Path(mobFamilyPath, fileName);
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
Assert.assertTrue(fs.exists(targetPath));
http://git-wip-us.apache.org/repos/asf/hbase/blob/c91bfff5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index f90a8c9..7459a7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -131,7 +131,7 @@ public class BaseTestHBaseFsck {
Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"),
Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") };
-
+
/**
* Create a new region in META.
*/
@@ -633,8 +633,7 @@ public class BaseTestHBaseFsck {
* @throws IOException
*/
Path getFlushedMobFile(FileSystem fs, TableName table) throws IOException {
- Path regionDir = MobUtils.getMobRegionPath(conf, table);
- Path famDir = new Path(regionDir, FAM_STR);
+ Path famDir = MobUtils.getMobFamilyPath(conf, table, FAM_STR);
// keep doing this until we get a legit hfile
while (true) {
[02/17] hbase git commit: HBASE-14698 Set category timeouts on
TestScanner and TestNamespaceAuditor
Posted by sy...@apache.org.
HBASE-14698 Set category timeouts on TestScanner and TestNamespaceAuditor
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f6ec611
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f6ec611
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f6ec611
Branch: refs/heads/hbase-12439
Commit: 0f6ec611e653eec03cd3d2c516cbfcb202090916
Parents: 4c04e80
Author: stack <st...@apache.org>
Authored: Mon Oct 26 12:55:36 2015 -0700
Committer: stack <st...@apache.org>
Committed: Mon Oct 26 13:05:58 2015 -0700
----------------------------------------------------------------------
.../hbase/io/encoding/TestDataBlockEncoders.java | 3 ---
.../hbase/namespace/TestNamespaceAuditor.java | 19 ++++++++++++-------
.../hadoop/hbase/regionserver/TestScanner.java | 5 +++++
3 files changed, 17 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0f6ec611/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index 2e9e973..9863fac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -32,9 +32,6 @@ import java.util.List;
import java.util.Random;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
http://git-wip-us.apache.org/repos/asf/hbase/blob/0f6ec611/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
index 9912808..41a9713 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
@@ -37,6 +37,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -85,13 +86,17 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
+import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
import com.google.common.collect.Sets;
@Category(MediumTests.class)
public class TestNamespaceAuditor {
+ @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
+ withTimeout(this.getClass()).withLookingForStuckThread(true).build();
private static final Log LOG = LogFactory.getLog(TestNamespaceAuditor.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static HBaseAdmin ADMIN;
@@ -133,7 +138,7 @@ public class TestNamespaceAuditor {
.getMasterQuotaManager().isQuotaEnabled());
}
- @Test(timeout = 60000)
+ @Test
public void testTableOperations() throws Exception {
String nsp = prefix + "_np2";
NamespaceDescriptor nspDesc =
@@ -480,7 +485,7 @@ public class TestNamespaceAuditor {
* namespace quota cache. Now correct the failure and recreate the table with same name.
* HBASE-13394
*/
- @Test(timeout = 180000)
+ @Test
public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exception {
String nsp1 = prefix + "_testRecreateTable";
NamespaceDescriptor nspDesc =
@@ -659,7 +664,7 @@ public class TestNamespaceAuditor {
observer.tableDeletionLatch.await();
}
- @Test(expected = QuotaExceededException.class, timeout = 30000)
+ @Test(expected = QuotaExceededException.class)
public void testExceedTableQuotaInNamespace() throws Exception {
String nsp = prefix + "_testExceedTableQuotaInNamespace";
NamespaceDescriptor nspDesc =
@@ -676,7 +681,7 @@ public class TestNamespaceAuditor {
ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
}
- @Test(expected = QuotaExceededException.class, timeout = 30000)
+ @Test(expected = QuotaExceededException.class)
public void testCloneSnapshotQuotaExceed() throws Exception {
String nsp = prefix + "_testTableQuotaExceedWithCloneSnapshot";
NamespaceDescriptor nspDesc =
@@ -694,7 +699,7 @@ public class TestNamespaceAuditor {
ADMIN.deleteSnapshot(snapshot);
}
- @Test(timeout = 180000)
+ @Test
public void testCloneSnapshot() throws Exception {
String nsp = prefix + "_testCloneSnapshot";
NamespaceDescriptor nspDesc =
@@ -729,7 +734,7 @@ public class TestNamespaceAuditor {
ADMIN.deleteSnapshot(snapshot);
}
- @Test(timeout = 180000)
+ @Test
public void testRestoreSnapshot() throws Exception {
String nsp = prefix + "_testRestoreSnapshot";
NamespaceDescriptor nspDesc =
@@ -763,7 +768,7 @@ public class TestNamespaceAuditor {
ADMIN.deleteSnapshot(snapshot);
}
- @Test(timeout = 180000)
+ @Test
public void testRestoreSnapshotQuotaExceed() throws Exception {
String nsp = prefix + "_testRestoreSnapshotQuotaExceed";
NamespaceDescriptor nspDesc =
http://git-wip-us.apache.org/repos/asf/hbase/blob/0f6ec611/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
index ad71bc7..8f0cd4c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -33,6 +33,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestCase;
@@ -61,6 +62,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
/**
* Test of a long-lived scanner validating as we go.
@@ -68,6 +70,9 @@ import org.junit.rules.TestName;
@Category({RegionServerTests.class, SmallTests.class})
public class TestScanner {
@Rule public TestName name = new TestName();
+ @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
+ withTimeout(this.getClass()).withLookingForStuckThread(true).build();
+
private static final Log LOG = LogFactory.getLog(TestScanner.class);
private final static HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
[08/17] hbase git commit: HBASE-14678 Experiment: Temporarily disable
balancer and a few others to see if root of crashed/timedout JVMs; ADDENDUM
Posted by sy...@apache.org.
HBASE-14678 Experiment: Temporarily disable balancer and a few others to see if root of crashed/timedout JVMs; ADDENDUM
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/496d20cf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/496d20cf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/496d20cf
Branch: refs/heads/hbase-12439
Commit: 496d20cfca5a30bc72a29e4ef893424964f9fa91
Parents: 2b86002
Author: stack <st...@apache.org>
Authored: Mon Oct 26 20:20:47 2015 -0700
Committer: stack <st...@apache.org>
Committed: Mon Oct 26 20:21:01 2015 -0700
----------------------------------------------------------------------
.../TestMobFlushSnapshotFromClient.java | 72 --------------------
1 file changed, 72 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/496d20cf/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
deleted file mode 100644
index 69f2cb3..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.mob.MobConstants;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test creating/using/deleting snapshots from the client
- * <p>
- * This is an end-to-end test for the snapshot utility
- *
- * TODO This is essentially a clone of TestSnapshotFromClient. This is worth refactoring this
- * because there will be a few more flavors of snapshots that need to run these tests.
- */
-@Category({ClientTests.class, LargeTests.class})
-public class TestMobFlushSnapshotFromClient extends TestFlushSnapshotFromClient {
- private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
-
- @BeforeClass
- public static void setupCluster() throws Exception {
- setupConf(UTIL.getConfiguration());
- UTIL.startMiniCluster(3);
- }
-
- protected static void setupConf(Configuration conf) {
- TestFlushSnapshotFromClient.setupConf(conf);
- UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
- }
-
- @Override
- protected void createTable() throws Exception {
- MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
- }
-
- @Override
- protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
- long expectedRows) throws IOException {
- MobSnapshotTestingUtils.verifyMobRowCount(util, tableName, expectedRows);
- }
-
- @Override
- protected int countRows(final Table table, final byte[]... families) throws IOException {
- return MobSnapshotTestingUtils.countMobRows(table, families);
- }
-}
[12/17] hbase git commit: HBASE-14696 Support setting
allowPartialResults in mapreduce Mappers
Posted by sy...@apache.org.
HBASE-14696 Support setting allowPartialResults in mapreduce Mappers
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f91546f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f91546f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f91546f2
Branch: refs/heads/hbase-12439
Commit: f91546f2e69a0452074d781c8df5723bab1fa393
Parents: a321da2
Author: tedyu <yu...@gmail.com>
Authored: Tue Oct 27 07:57:32 2015 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue Oct 27 07:57:32 2015 -0700
----------------------------------------------------------------------
.../hadoop/hbase/protobuf/ProtobufUtil.java | 6 +
.../hbase/protobuf/generated/ClientProtos.java | 233 +++++++++++++------
hbase-protocol/src/main/protobuf/Client.proto | 1 +
3 files changed, 169 insertions(+), 71 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/f91546f2/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 4426a15..15c5675 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -868,6 +868,9 @@ public final class ProtobufUtil {
if (scan.isSmall()) {
scanBuilder.setSmall(scan.isSmall());
}
+ if (scan.getAllowPartialResults()) {
+ scanBuilder.setAllowPartialResults(scan.getAllowPartialResults());
+ }
Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
if (loadColumnFamiliesOnDemand != null) {
scanBuilder.setLoadColumnFamiliesOnDemand(loadColumnFamiliesOnDemand.booleanValue());
@@ -992,6 +995,9 @@ public final class ProtobufUtil {
if (proto.hasSmall()) {
scan.setSmall(proto.getSmall());
}
+ if (proto.hasAllowPartialResults()) {
+ scan.setAllowPartialResults(proto.getAllowPartialResults());
+ }
for (NameBytesPair attribute: proto.getAttributeList()) {
scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f91546f2/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index c4b1eec..9c7ff54 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -13682,6 +13682,16 @@ public final class ClientProtos {
* <code>optional uint32 caching = 17;</code>
*/
int getCaching();
+
+ // optional bool allow_partial_results = 18;
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ boolean hasAllowPartialResults();
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ boolean getAllowPartialResults();
}
/**
* Protobuf type {@code hbase.pb.Scan}
@@ -13858,6 +13868,11 @@ public final class ClientProtos {
caching_ = input.readUInt32();
break;
}
+ case 144: {
+ bitField0_ |= 0x00008000;
+ allowPartialResults_ = input.readBool();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -14236,6 +14251,22 @@ public final class ClientProtos {
return caching_;
}
+ // optional bool allow_partial_results = 18;
+ public static final int ALLOW_PARTIAL_RESULTS_FIELD_NUMBER = 18;
+ private boolean allowPartialResults_;
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ public boolean hasAllowPartialResults() {
+ return ((bitField0_ & 0x00008000) == 0x00008000);
+ }
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ public boolean getAllowPartialResults() {
+ return allowPartialResults_;
+ }
+
private void initFields() {
column_ = java.util.Collections.emptyList();
attribute_ = java.util.Collections.emptyList();
@@ -14254,6 +14285,7 @@ public final class ClientProtos {
reversed_ = false;
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
caching_ = 0;
+ allowPartialResults_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -14336,6 +14368,9 @@ public final class ClientProtos {
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeUInt32(17, caching_);
}
+ if (((bitField0_ & 0x00008000) == 0x00008000)) {
+ output.writeBool(18, allowPartialResults_);
+ }
getUnknownFields().writeTo(output);
}
@@ -14413,6 +14448,10 @@ public final class ClientProtos {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(17, caching_);
}
+ if (((bitField0_ & 0x00008000) == 0x00008000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(18, allowPartialResults_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -14515,6 +14554,11 @@ public final class ClientProtos {
result = result && (getCaching()
== other.getCaching());
}
+ result = result && (hasAllowPartialResults() == other.hasAllowPartialResults());
+ if (hasAllowPartialResults()) {
+ result = result && (getAllowPartialResults()
+ == other.getAllowPartialResults());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -14596,6 +14640,10 @@ public final class ClientProtos {
hash = (37 * hash) + CACHING_FIELD_NUMBER;
hash = (53 * hash) + getCaching();
}
+ if (hasAllowPartialResults()) {
+ hash = (37 * hash) + ALLOW_PARTIAL_RESULTS_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getAllowPartialResults());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -14770,6 +14818,8 @@ public final class ClientProtos {
bitField0_ = (bitField0_ & ~0x00008000);
caching_ = 0;
bitField0_ = (bitField0_ & ~0x00010000);
+ allowPartialResults_ = false;
+ bitField0_ = (bitField0_ & ~0x00020000);
return this;
}
@@ -14884,6 +14934,10 @@ public final class ClientProtos {
to_bitField0_ |= 0x00004000;
}
result.caching_ = caching_;
+ if (((from_bitField0_ & 0x00020000) == 0x00020000)) {
+ to_bitField0_ |= 0x00008000;
+ }
+ result.allowPartialResults_ = allowPartialResults_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -14997,6 +15051,9 @@ public final class ClientProtos {
if (other.hasCaching()) {
setCaching(other.getCaching());
}
+ if (other.hasAllowPartialResults()) {
+ setAllowPartialResults(other.getAllowPartialResults());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -16210,6 +16267,39 @@ public final class ClientProtos {
return this;
}
+ // optional bool allow_partial_results = 18;
+ private boolean allowPartialResults_ ;
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ public boolean hasAllowPartialResults() {
+ return ((bitField0_ & 0x00020000) == 0x00020000);
+ }
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ public boolean getAllowPartialResults() {
+ return allowPartialResults_;
+ }
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ public Builder setAllowPartialResults(boolean value) {
+ bitField0_ |= 0x00020000;
+ allowPartialResults_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool allow_partial_results = 18;</code>
+ */
+ public Builder clearAllowPartialResults() {
+ bitField0_ = (bitField0_ & ~0x00020000);
+ allowPartialResults_ = false;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.Scan)
}
@@ -33168,7 +33258,7 @@ public final class ClientProtos {
"b.MutationProto\022&\n\tcondition\030\003 \001(\0132\023.hba",
"se.pb.Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016" +
"MutateResponse\022 \n\006result\030\001 \001(\0132\020.hbase.p" +
- "b.Result\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006" +
+ "b.Result\022\021\n\tprocessed\030\002 \001(\010\"\205\004\n\004Scan\022 \n\006" +
"column\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattrib" +
"ute\030\002 \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tst" +
"art_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filte" +
@@ -33180,75 +33270,76 @@ public final class ClientProtos {
"t\030\014 \001(\r\022&\n\036load_column_families_on_deman" +
"d\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010" +
":\005false\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb." +
- "Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n" +
- "\013ScanRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb." +
- "RegionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb" +
- ".Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_r" +
- "ows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext" +
- "_call_seq\030\006 \001(\004\022\037\n\027client_handles_partia",
- "ls\030\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 " +
- "\001(\010\022\032\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014Scan" +
- "Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" +
- "anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" +
- "ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re" +
- "sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" +
- "result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" +
- " \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" +
- "metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" +
- "\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.",
- "hbase.pb.RegionSpecifier\022>\n\013family_path\030" +
- "\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" +
- "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" +
- "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" +
- "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" +
- "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" +
- "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" +
- "\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" +
- "sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" +
- "Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg",
- "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" +
- "call\030\002 \002(\0132 .hbase.pb.CoprocessorService" +
- "Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" +
- "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" +
- "\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001" +
- "\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" +
- "\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" +
- "\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" +
- "base.pb.CoprocessorServiceCall\"k\n\014Region" +
- "Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region",
- "Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" +
- "\0132\020.hbase.pb.Action\"D\n\017RegionLoadStats\022\027" +
- "\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" +
- "\030\002 \001(\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index" +
- "\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result" +
- "\022*\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytes" +
- "Pair\022:\n\016service_result\030\004 \001(\0132\".hbase.pb." +
- "CoprocessorServiceResult\022,\n\tloadStats\030\005 " +
- "\001(\0132\031.hbase.pb.RegionLoadStats\"x\n\022Region" +
- "ActionResult\0226\n\021resultOrException\030\001 \003(\0132",
- "\033.hbase.pb.ResultOrException\022*\n\texceptio" +
- "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" +
- "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" +
- "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" +
- "ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMu" +
- "ltiResponse\0228\n\022regionActionResult\030\001 \003(\0132" +
- "\034.hbase.pb.RegionActionResult\022\021\n\tprocess" +
- "ed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010T" +
- "IMELINE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hba" +
- "se.pb.GetRequest\032\025.hbase.pb.GetResponse\022",
- ";\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030.hba" +
- "se.pb.MutateResponse\0225\n\004Scan\022\025.hbase.pb." +
- "ScanRequest\032\026.hbase.pb.ScanResponse\022P\n\rB" +
- "ulkLoadHFile\022\036.hbase.pb.BulkLoadHFileReq" +
- "uest\032\037.hbase.pb.BulkLoadHFileResponse\022X\n" +
- "\013ExecService\022#.hbase.pb.CoprocessorServi" +
- "ceRequest\032$.hbase.pb.CoprocessorServiceR" +
- "esponse\022d\n\027ExecRegionServerService\022#.hba" +
- "se.pb.CoprocessorServiceRequest\032$.hbase." +
- "pb.CoprocessorServiceResponse\0228\n\005Multi\022\026",
- ".hbase.pb.MultiRequest\032\027.hbase.pb.MultiR" +
- "esponseBB\n*org.apache.hadoop.hbase.proto" +
- "buf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
+ "Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\022\035\n\025" +
+ "allow_partial_results\030\022 \001(\010\"\220\002\n\013ScanRequ" +
+ "est\022)\n\006region\030\001 \001(\0132\031.hbase.pb.RegionSpe" +
+ "cifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\n" +
+ "scanner_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r" +
+ "\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_call_seq",
+ "\030\006 \001(\004\022\037\n\027client_handles_partials\030\007 \001(\010\022" +
+ "!\n\031client_handles_heartbeats\030\010 \001(\010\022\032\n\022tr" +
+ "ack_scan_metrics\030\t \001(\010\"\232\002\n\014ScanResponse\022" +
+ "\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_id\030" +
+ "\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r" +
+ "\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n\005s" +
+ "tale\030\006 \001(\010\022\037\n\027partial_flag_per_result\030\007 " +
+ "\003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n\021h" +
+ "eartbeat_message\030\t \001(\010\022+\n\014scan_metrics\030\n" +
+ " \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n\024BulkLoad",
+ "HFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
+ "RegionSpecifier\022>\n\013family_path\030\002 \003(\0132).h" +
+ "base.pb.BulkLoadHFileRequest.FamilyPath\022" +
+ "\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n" +
+ "\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadH" +
+ "FileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Coproces" +
+ "sorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_n" +
+ "ame\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007reques" +
+ "t\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n\005v" +
+ "alue\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n\031C",
+ "oprocessorServiceRequest\022)\n\006region\030\001 \002(\013" +
+ "2\031.hbase.pb.RegionSpecifier\022.\n\004call\030\002 \002(" +
+ "\0132 .hbase.pb.CoprocessorServiceCall\"o\n\032C" +
+ "oprocessorServiceResponse\022)\n\006region\030\001 \002(" +
+ "\0132\031.hbase.pb.RegionSpecifier\022&\n\005value\030\002 " +
+ "\002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Action\022" +
+ "\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase" +
+ ".pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase.p" +
+ "b.Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb.C" +
+ "oprocessorServiceCall\"k\n\014RegionAction\022)\n",
+ "\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier" +
+ "\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase" +
+ ".pb.Action\"D\n\017RegionLoadStats\022\027\n\014memstor" +
+ "eLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010" +
+ "\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n" +
+ "\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcep" +
+ "tion\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016s" +
+ "ervice_result\030\004 \001(\0132\".hbase.pb.Coprocess" +
+ "orServiceResult\022,\n\tloadStats\030\005 \001(\0132\031.hba" +
+ "se.pb.RegionLoadStats\"x\n\022RegionActionRes",
+ "ult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase.p" +
+ "b.ResultOrException\022*\n\texception\030\002 \001(\0132\027" +
+ ".hbase.pb.NameBytesPair\"x\n\014MultiRequest\022" +
+ ",\n\014regionAction\030\001 \003(\0132\026.hbase.pb.RegionA" +
+ "ction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003" +
+ " \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRespon" +
+ "se\0228\n\022regionActionResult\030\001 \003(\0132\034.hbase.p" +
+ "b.RegionActionResult\022\021\n\tprocessed\030\002 \001(\010*" +
+ "\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" +
+ "2\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get",
+ "Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" +
+ "\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" +
+ "ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque" +
+ "st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" +
+ "ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb" +
+ "ase.pb.BulkLoadHFileResponse\022X\n\013ExecServ" +
+ "ice\022#.hbase.pb.CoprocessorServiceRequest" +
+ "\032$.hbase.pb.CoprocessorServiceResponse\022d" +
+ "\n\027ExecRegionServerService\022#.hbase.pb.Cop" +
+ "rocessorServiceRequest\032$.hbase.pb.Coproc",
+ "essorServiceResponse\0228\n\005Multi\022\026.hbase.pb" +
+ ".MultiRequest\032\027.hbase.pb.MultiResponseBB" +
+ "\n*org.apache.hadoop.hbase.protobuf.gener" +
+ "atedB\014ClientProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -33338,7 +33429,7 @@ public final class ClientProtos {
internal_static_hbase_pb_Scan_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_Scan_descriptor,
- new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", });
+ new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", });
internal_static_hbase_pb_ScanRequest_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new
http://git-wip-us.apache.org/repos/asf/hbase/blob/f91546f2/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index 101854d..e33f9f2 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -251,6 +251,7 @@ message Scan {
optional bool reversed = 15 [default = false];
optional Consistency consistency = 16 [default = STRONG];
optional uint32 caching = 17;
+ optional bool allow_partial_results = 18;
}
/**