You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/03/27 20:08:59 UTC
[01/23] hbase git commit: HBASE-16084 Cleaned up the stale references
in Javadoc
Repository: hbase
Updated Branches:
refs/heads/hbase-12439 4088f822a -> 4b62a52eb
HBASE-16084 Cleaned up the stale references in Javadoc
Signed-off-by: tedyu <yu...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55d6dcaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55d6dcaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55d6dcaf
Branch: refs/heads/hbase-12439
Commit: 55d6dcaf877cc5223e679736eb613173229c18be
Parents: 4088f82
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Mar 19 20:49:28 2017 +0100
Committer: tedyu <yu...@gmail.com>
Committed: Mon Mar 20 10:55:36 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/HTableDescriptor.java | 14 +++++++-------
.../apache/hadoop/hbase/client/AsyncProcess.java | 6 +++---
.../hadoop/hbase/client/ConnectionFactory.java | 2 +-
.../apache/hadoop/hbase/client/MasterCallable.java | 2 +-
.../java/org/apache/hadoop/hbase/client/Query.java | 2 +-
.../hbase/client/coprocessor/package-info.java | 8 ++++----
.../FirstKeyValueMatchingQualifiersFilter.java | 2 +-
.../hadoop/hbase/ipc/ServerRpcController.java | 4 ++--
.../hbase/zookeeper/RecoverableZooKeeper.java | 2 +-
.../java/org/apache/hadoop/hbase/nio/ByteBuff.java | 2 +-
.../org/apache/hadoop/hbase/util/OrderedBytes.java | 2 +-
.../hadoop/hbase/HBaseCommonTestingUtility.java | 3 +--
.../codec/prefixtree/scanner/CellSearcher.java | 2 +-
.../store/wal/ProcedureWALFormatReader.java | 3 +--
.../apache/hadoop/hbase/backup/HFileArchiver.java | 2 --
.../hbase/backup/example/HFileArchiveManager.java | 3 ++-
.../hadoop/hbase/backup/util/RestoreTool.java | 4 ++--
.../apache/hadoop/hbase/constraint/Constraint.java | 8 ++++----
.../hbase/io/hfile/CompoundBloomFilterWriter.java | 2 +-
.../apache/hadoop/hbase/io/hfile/HFileBlock.java | 2 +-
.../hadoop/hbase/io/hfile/HFileBlockIndex.java | 2 +-
.../apache/hadoop/hbase/mapreduce/RowCounter.java | 3 +--
.../hadoop/hbase/master/TableNamespaceManager.java | 2 +-
.../master/balancer/StochasticLoadBalancer.java | 4 ++--
.../regionserver/MiniBatchOperationInProgress.java | 4 ++--
.../hadoop/hbase/regionserver/StoreFileReader.java | 2 +-
.../hadoop/hbase/regionserver/wal/FSHLog.java | 8 ++++----
.../regionserver/wal/SequenceIdAccounting.java | 6 +++---
.../hadoop/hbase/regionserver/wal/SyncFuture.java | 4 ++--
.../access/CoprocessorWhitelistMasterObserver.java | 2 +-
.../hbase/security/access/TableAuthManager.java | 2 +-
.../apache/hadoop/hbase/HBaseTestingUtility.java | 6 +++---
.../apache/hadoop/hbase/TestMetaTableLocator.java | 8 ++++----
.../hbase/TestPartialResultsFromClientSide.java | 2 +-
.../org/apache/hadoop/hbase/TestSerialization.java | 4 ++--
.../hbase/client/TestMultipleTimestamps.java | 2 +-
.../TestFirstKeyValueMatchingQualifiersFilter.java | 2 +-
.../apache/hadoop/hbase/io/hfile/TestSeekTo.java | 2 +-
.../hbase/mapreduce/TestHFileOutputFormat2.java | 17 +++++++----------
.../hadoop/hbase/mapreduce/TestImportExport.java | 2 +-
.../hadoop/hbase/master/MockRegionServer.java | 4 ++--
.../hadoop/hbase/master/TestWarmupRegion.java | 2 +-
.../hbase/procedure/TestProcedureMember.java | 4 ++--
.../hbase/regionserver/DataBlockEncodingTool.java | 2 +-
.../hbase/regionserver/OOMERegionServer.java | 3 +--
.../TestRegionMergeTransactionOnCluster.java | 6 ------
.../org/apache/hadoop/hbase/util/LoadTestTool.java | 2 +-
.../hadoop/hbase/util/MultiThreadedAction.java | 4 ++--
48 files changed, 86 insertions(+), 100 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index a49cf1c..25fd896 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -723,7 +723,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* This sets the class associated with the region split policy which
* determines when a region split should occur. The class used by
- * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
+ * default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
* @param clazz the class name
*/
public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
@@ -734,7 +734,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* This gets the class associated with the region split policy which
* determines when a region split should occur. The class used by
- * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
+ * default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy
*
* @return the class name of the region split policy for this table.
* If this returns null, the default split policy is used.
@@ -827,7 +827,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* This sets the class associated with the flush policy which determines determines the stores
* need to be flushed when flushing a region. The class used by default is defined in
- * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
+ * org.apache.hadoop.hbase.regionserver.FlushPolicy.
* @param clazz the class name
*/
public HTableDescriptor setFlushPolicyClassName(String clazz) {
@@ -838,7 +838,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* This gets the class associated with the flush policy which determines the stores need to be
* flushed when flushing a region. The class used by default is defined in
- * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
+ * org.apache.hadoop.hbase.regionserver.FlushPolicy.
* @return the class name of the flush policy for this table. If this returns null, the default
* flush policy is used.
*/
@@ -1244,7 +1244,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* Add a table coprocessor to this table. The coprocessor
- * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
+ * type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
* or Endpoint.
* It won't check if the class can be loaded or not.
* Whether a coprocessor is loadable or not will be determined when
@@ -1259,7 +1259,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* Add a table coprocessor to this table. The coprocessor
- * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
+ * type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
* or Endpoint.
* It won't check if the class can be loaded or not.
* Whether a coprocessor is loadable or not will be determined when
@@ -1304,7 +1304,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
/**
* Add a table coprocessor to this table. The coprocessor
- * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
+ * type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
* or Endpoint.
* It won't check if the class can be loaded or not.
* Whether a coprocessor is loadable or not will be determined when
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index a65d327..ba6b052 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -452,7 +452,7 @@ class AsyncProcess {
/**
* Only used w/useGlobalErrors ctor argument, for HTable backward compat.
* @return Whether there were any errors in any request since the last time
- * {@link #waitForAllPreviousOpsAndReset(List, String)} was called, or AP was created.
+ * {@link #waitForAllPreviousOpsAndReset(List, TableName)} was called, or AP was created.
*/
public boolean hasError() {
return globalErrors != null && globalErrors.hasErrors();
@@ -463,9 +463,9 @@ class AsyncProcess {
* Waits for all previous operations to finish, and returns errors and (optionally)
* failed operations themselves.
* @param failedRows an optional list into which the rows that failed since the last time
- * {@link #waitForAllPreviousOpsAndReset(List, String)} was called, or AP was created, are saved.
+ * {@link #waitForAllPreviousOpsAndReset(List, TableName)} was called, or AP was created, are saved.
* @param tableName name of the table
- * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List, String)}
+ * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List, TableName)}
* was called, or AP was created.
*/
public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index 7cbcc20..64f337a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
* A non-instantiable class that manages creation of {@link Connection}s. Managing the lifecycle of
* the {@link Connection}s to the cluster is the responsibility of the caller. From a
* {@link Connection}, {@link Table} implementations are retrieved with
- * {@link Connection#getTable(TableName)}. Example:
+ * {@link Connection#getTable(org.apache.hadoop.hbase.TableName)}. Example:
*
* <pre>
* Connection connection = ConnectionFactory.createConnection(config);
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index 0b24bcd..ca4ae8f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -80,7 +80,7 @@ abstract class MasterCallable<V> implements RetryingCallable<V>, Closeable {
}
/**
- * Override that changes the {@link Callable#call()} Exception from {@link Exception} to
+ * Override that changes the {@link java.util.concurrent.Callable#call()} Exception from {@link Exception} to
* {@link IOException}. It also does setup of an rpcController and calls through to the rpcCall()
* method which callers are expected to implement. If rpcController is an instance of
* PayloadCarryingRpcController, we will set a timeout on it.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 222eaff..1322ef5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -56,7 +56,7 @@ public abstract class Query extends OperationWithAttributes {
/**
* Apply the specified server-side filter when performing the Query.
- * Only {@link Filter#filterKeyValue(Cell)} is called AFTER all tests
+ * Only {@link Filter#filterKeyValue(org.apache.hadoop.hbase.Cell)} is called AFTER all tests
* for ttl, column match, deletes and max versions have been run.
* @param filter filter to run on the server
* @return this for invocation chaining
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
index c70f27f..82f0c87 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
@@ -44,8 +44,8 @@ must:
for more details on defining services.</li>
<li>Generate the Service and Message code using the protoc compiler</li>
<li>Implement the generated Service interface in your coprocessor class and implement the
- {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} interface. The
- {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()}
+ org.apache.hadoop.hbase.coprocessor.CoprocessorService interface. The
+ org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()
method should return a reference to the Endpoint's protocol buffer Service instance.
</ul>
<p>
@@ -146,10 +146,10 @@ public static abstract class RowCountService
</pre></blockquote></div>
<p>
-Our coprocessor Service will need to implement this interface and the {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService}
+Our coprocessor Service will need to implement this interface and the org.apache.hadoop.hbase.coprocessor.CoprocessorService
in order to be registered correctly as an endpoint. For the sake of simplicity the server-side
implementation is omitted. To see the implementing code, please see the
-{@link org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint} class in the HBase source code.
+org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint class in the HBase source code.
</p>
<p>
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
index 6b202ad..4681fd3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
* Note : It may emit KVs which do not have the given columns in them, if
* these KVs happen to occur before a KV which does have a match. Given this
* caveat, this filter is only useful for special cases
- * like {@link org.apache.hadoop.hbase.mapreduce.RowCounter}.
+ * like org.apache.hadoop.hbase.mapreduce.RowCounter.
* <p>
* @deprecated Deprecated in 2.0. See HBASE-13347
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
index b899eb8..a333d57 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
@@ -54,8 +54,8 @@ import org.apache.hadoop.util.StringUtils;
public class ServerRpcController implements RpcController {
/**
* The exception thrown within
- * {@link com.google.protobuf.Service#callMethod(
- * Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)}
+ * {@link com.google.protobuf.Service#callMethod(com.google.protobuf.Descriptors.MethodDescriptor, RpcController,
+ * com.google.protobuf.Message, RpcCallback)}
* if any.
*/
// TODO: it would be good widen this to just Throwable, but IOException is what we allow now
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
index 43a5ad9..4f07d5b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
@@ -630,7 +630,7 @@ public class RecoverableZooKeeper {
}
}
/**
- * Convert Iterable of {@link ZKOp} we got into the ZooKeeper.Op
+ * Convert Iterable of {@link org.apache.zookeeper.Op} we got into the ZooKeeper.Op
* instances to actually pass to multi (need to do this in order to appendMetaData).
*/
private Iterable<Op> prepareZKMulti(Iterable<Op> ops)
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index 60202a0..036c4e6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -492,7 +492,7 @@ public abstract class ByteBuff {
}
/**
- * Similar to {@link WritableUtils#readVLong(DataInput)} but reads from a
+ * Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a
* {@link ByteBuff}.
*/
public static long readVLong(ByteBuff in) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
index a0c7390..a167562 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java
@@ -341,7 +341,7 @@ public class OrderedBytes {
/**
* Perform unsigned comparison between two long values. Conforms to the same interface as
- * {@link Comparator#compare(Object, Object)}.
+ * {@link org.apache.hadoop.hbase.CellComparator#COMPARATOR#compare(Object, Object)}.
*/
private static int unsignedCmp(long x1, long x2) {
int cmp;
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
index 0bf8aea..fcad895 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
@@ -32,8 +32,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Common helpers for testing HBase that do not depend on specific server/etc. things.
- * @see {@link HBaseTestingUtility}
- *
+ * {@see org.apache.hadoop.hbase.HBaseTestingUtility}
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
index 4668468..aff3035 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
@@ -71,7 +71,7 @@ public interface CellSearcher extends ReversibleCellScanner {
/**
* <p>
* Note: Added for backwards compatibility with
- * {@link org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek}
+ * org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek(Cell)
* </p><p>
* Look for the key, but only look after the current position. Probably not needed for an
* efficient tree implementation, but is important for implementations without random access such
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
index aeae569..4cc459b 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
@@ -119,8 +119,7 @@ public class ProcedureWALFormatReader {
* purpose. If all procedures updated in a WAL are found to be obsolete, it can be safely deleted.
* (see {@link WALProcedureStore#removeInactiveLogs()}).
* However, we don't need deleted part of a WAL's tracker for this purpose, so we don't bother
- * re-building it. (To understand why, take a look at
- * {@link ProcedureStoreTracker.BitSetNode#subtract(ProcedureStoreTracker.BitSetNode)}).
+ * re-building it.
*/
private ProcedureStoreTracker localTracker;
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index 52185f1..ecd4401 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -466,8 +466,6 @@ public class HFileArchiver {
* <p>
* A best effort is made to delete each of the files, rather than bailing on the first failure.
* <p>
- * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
- * less resources, but is limited in terms of usefulness
* @param compactedFiles store files to delete from the file system.
* @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
* throwing the exception, rather than failing at the first file.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
index 63d88ef..55e3e1a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
@@ -67,7 +67,8 @@ class HFileArchiveManager {
/**
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
- * pass of the {@link HFileCleaner}, if the HFiles are retained by another cleaner.
+ * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another
+ * cleaner.
* @param table name of the table for which to disable hfile retention.
* @return <tt>this</tt> for chaining.
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index a130c21..79adcab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -283,7 +283,7 @@ public class RestoreTool {
/**
* Duplicate the backup image if it's on local cluster
- * @see HStore#bulkLoadHFile(String, long)
+ * @see HStore#bulkLoadHFile(org.apache.hadoop.hbase.regionserver.StoreFile)
* @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum)
* @param tableArchivePath archive path
* @return the new tableArchivePath
@@ -554,7 +554,7 @@ public class RestoreTool {
/**
* Prepare the table for bulkload, most codes copied from
- * {@link LoadIncrementalHFiles#createTable(String, String)}
+ * {@link LoadIncrementalHFiles#createTable(TableName, String, Admin)}
* @param conn connection
* @param tableBackupPath path
* @param tableName table name
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java
index a2711f2..db8d2e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java
@@ -27,15 +27,15 @@ import org.apache.hadoop.hbase.client.Put;
* any order.
* <p>
* A {@link Constraint} must be added to a table before the table is loaded via
- * {@link Constraints#add(HTableDescriptor, Class...)} or
- * {@link Constraints#add(HTableDescriptor,
+ * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class[])} or
+ * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor,
* org.apache.hadoop.hbase.util.Pair...)}
* (if you want to add a configuration with the {@link Constraint}). Constraints
* will be run in the order that they are added. Further, a Constraint will be
* configured before it is run (on load).
* <p>
- * See {@link Constraints#enableConstraint(HTableDescriptor, Class)} and
- * {@link Constraints#disableConstraint(HTableDescriptor, Class)} for
+ * See {@link Constraints#enableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} and
+ * {@link Constraints#disableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} for
* enabling/disabling of a given {@link Constraint} after it has been added.
* <p>
* If a {@link Put} is invalid, the Constraint should throw some sort of
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
index 96dfcbd..6c7ac37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
@@ -248,7 +248,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
}
/**
- * This is modeled after {@link BloomFilterChunk.MetaWriter} for simplicity,
+ * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity,
* although the two metadata formats do not have to be consistent. This
* does have to be consistent with how {@link
* CompoundBloomFilter#CompoundBloomFilter(DataInput,
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 0b140b6..4711cec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -697,7 +697,7 @@ public class HFileBlock implements Cacheable {
}
/**
- * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link CacheKey} when
+ * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link BlockCacheKey} when
* block is returned to the cache.
* @return the offset of this block in the file it was read from
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index b36c292..ae7bfda 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.util.StringUtils;
* {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and
* {@link HFileWriterImpl}. Examples of how to use the reader can be
* found in {@link HFileReaderImpl} and
- * {@link org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex}.
+ * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.
*/
@InterfaceAudience.Private
public class HFileBlockIndex {
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
index 47651af..46d29eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
@@ -76,8 +76,7 @@ public class RowCounter extends Configured implements Tool {
* @param values The columns.
* @param context The current context.
* @throws IOException When something is broken with the data.
- * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN,
- * org.apache.hadoop.mapreduce.Mapper.Context)
+ * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context)
*/
@Override
public void map(ImmutableBytesWritable row, Result values,
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 2f06972..7582d42 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -212,7 +212,7 @@ public class TableNamespaceManager {
/**
* Create Namespace in a blocking manner. Keeps trying until
- * {@link ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires.
+ * {@link ClusterSchema#HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires.
* Note, by-passes notifying coprocessors and name checks. Use for system namespaces only.
*/
private void blockingCreateNamespace(final NamespaceDescriptor namespaceDescriptor)
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 5c92973..59ea067 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -980,8 +980,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
abstract double cost();
/**
- * Function to compute a scaled cost using {@link DescriptiveStatistics}. It
- * assumes that this is a zero sum set of costs. It assumes that the worst case
+ * Function to compute a scaled cost using {@link org.apache.commons.math3.stat.descriptive.DescriptiveStatistics}.
+ * It assumes that this is a zero sum set of costs. It assumes that the worst case
* possible is all of the elements in one region server and the rest having 0.
*
* @param stats the costs
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index 1ab2ef5..e9458d4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -25,9 +25,9 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
* Wraps together the mutations which are applied as a batch to the region and their operation
* status and WALEdits.
* @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate(
- * ObserverContext, MiniBatchOperationInProgress)
+ * org.apache.hadoop.hbase.coprocessor.ObserverContext, MiniBatchOperationInProgress)
* @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
- * ObserverContext, MiniBatchOperationInProgress)
+ * org.apache.hadoop.hbase.coprocessor.ObserverContext, MiniBatchOperationInProgress)
* @param T Pair<Mutation, Integer> pair of Mutations and associated rowlock ids .
*/
@InterfaceAudience.LimitedPrivate("Coprocessors")
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index d91e79e..8f01a93 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -208,7 +208,7 @@ public class StoreFileReader {
* Checks whether the given scan passes the Bloom filter (if present). Only
* checks Bloom filters for single-row or single-row-column scans. Bloom
* filter checking for multi-gets is implemented as part of the store
- * scanner system (see {@link StoreFileScanner#seekExactly}) and uses
+ * scanner system (see {@link StoreFileScanner#seek(Cell)} and uses
* the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)}
* and {@link #passesGeneralRowColBloomFilter(Cell)}.
*
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index f0e29c1..caf07a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -244,8 +244,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
/**
* Currently, we need to expose the writer's OutputStream to tests so that they can manipulate the
* default behavior (such as setting the maxRecoveryErrorCount value for example (see
- * {@link AbstractTestWALReplay#testReplayEditsWrittenIntoWAL()}). This is done using reflection
- * on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is
+ * {@see org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay#testReplayEditsWrittenIntoWAL()}). This is
+ * done using reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is
* removed.
* @return null if underlying stream is not ready.
*/
@@ -809,9 +809,9 @@ public class FSHLog extends AbstractFSWAL<Writer> {
* To start up the drama, Thread A creates an instance of this class each time it would do this
* zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only).
* Thread B notices the new instance (via reading a volatile reference or how ever) and it starts
- * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint()} when it cannot proceed
+ * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it cannot proceed
* until the Thread B 'safe point' is attained. Thread A will be held inside in
- * {@link #waitSafePoint()} until Thread B reaches the 'safe point'. Once there, Thread B frees
+ * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread B frees
* Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the 'safe
* point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it blocks
* here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it needs
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
index cd73eb3..b065a59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
@@ -61,7 +61,7 @@ class SequenceIdAccounting {
* {@link #flushingSequenceIds}.
*
* <p>The two Maps are tied by this locking object EXCEPT when we go to update the lowest
- * entry; see {@link #lowest(byte[], Set, Long)}. In here is a putIfAbsent call on
+ * entry; see {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on
* {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest
* sequence id if we find that there is no entry for the current column family. There will be no
* entry only if we just came up OR we have moved aside current set of lowest sequence ids
@@ -403,8 +403,8 @@ class SequenceIdAccounting {
/**
* Iterates over the given Map and compares sequence ids with corresponding entries in
- * {@link #oldestUnflushedRegionSequenceIds}. If a region in
- * {@link #oldestUnflushedRegionSequenceIds} has a sequence id less than that passed in
+ * {@link #lowestUnflushedSequenceIds}. If a region in
+ * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in
* <code>sequenceids</code> then return it.
* @param sequenceids Sequenceids keyed by encoded region name.
* @return regions found in this instance with sequence ids less than those passed in.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
index bc2e62e..d11fbe7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
@@ -77,7 +77,7 @@ class SyncFuture {
* Call this method to clear old usage and get it ready for new deploy.
* @param txid the new transaction id
* @param span current span, detached from caller. Don't forget to attach it when resuming after a
- * call to {@link #get()}.
+ * call to {@link #get(long)}.
* @return this
*/
synchronized SyncFuture reset(final long txid, Span span) {
@@ -107,7 +107,7 @@ class SyncFuture {
/**
* Retrieve the {@code span} instance from this Future. EventHandler calls this method to continue
* the span. Thread waiting on this Future musn't call this method until AFTER calling
- * {@link #get()} and the future has been released back to the originating thread.
+ * {@link #get(long)} and the future has been released back to the originating thread.
*/
synchronized Span getSpan() {
return this.span;
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java
index 649fbdd..5771593 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java
@@ -75,7 +75,7 @@ public class CoprocessorWhitelistMasterObserver implements MasterObserver {
* 1) a "*" to wildcard all coprocessor paths
* 2) a specific filesystem (e.g. hdfs://my-cluster/)
* 3) a wildcard path to be evaluated by
- * {@link FilenameUtils.wildcardMatch}
+ * {@link FilenameUtils#wildcardMatch(String, String)}
* path can specify scheme or not (e.g.
* "file:///usr/hbase/coprocessors" or for all
* filesystems "/usr/hbase/coprocessors")
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
index 0d539ce..84cda91 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
@@ -85,7 +85,7 @@ public class TableAuthManager implements Closeable {
/**
* Returns a combined map of user and group permissions, with group names
- * distinguished according to {@link AuthUtil.isGroupPrincipal}
+ * distinguished according to {@link AuthUtil#isGroupPrincipal(String)}.
*/
public ListMultimap<String,T> getAllPermissions() {
ListMultimap<String,T> tmp = ArrayListMultimap.create();
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 8a4ed72..696ea18 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2059,7 +2059,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
/** A tracker for tracking and validating table rows
- * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
+ * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
*/
public static class SeenRowTracker {
int dim = 'z' - 'a' + 1;
@@ -2326,7 +2326,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return digest.toString();
}
- /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
+ /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
static {
int i = 0;
@@ -3110,7 +3110,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the
* regions have been all assigned.
- * @see #waitTableEnabled(Admin, byte[], long)
+ * @see #waitTableEnabled(TableName, long)
* @param table Table to wait on.
* @param timeoutMillis Time to wait on it being marked enabled.
* @throws InterruptedException
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
index 4a0b5c9..8bebd8d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
@@ -301,12 +301,12 @@ public class TestMetaTableLocator {
* want to pass a mocked HRS; can be null.
* @param client A mocked ClientProtocol instance, can be null
* @return Mock up a connection that returns a {@link Configuration} when
- * {@link HConnection#getConfiguration()} is called, a 'location' when
- * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
+ * {@link org.apache.hadoop.hbase.client.ClusterConnection#getConfiguration()} is called, a 'location' when
+ * {@link org.apache.hadoop.hbase.client.RegionLocator#getRegionLocation(byte[], boolean)} is called,
* and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
- * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
+ * {@link org.apache.hadoop.hbase.client.ClusterConnection#getAdmin(ServerName)} is called, returns the passed
* {@link ClientProtos.ClientService.BlockingInterface} instance when
- * {@link HConnection#getClient(ServerName)} is called.
+ * {@link org.apache.hadoop.hbase.client.ClusterConnection#getClient(ServerName)} is called.
* @throws IOException
*/
private ClusterConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
index f3ea814..96bc811 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
@@ -447,7 +447,7 @@ public class TestPartialResultsFromClientSide {
}
/**
- * Test the method {@link Result#createCompleteResult(List)}
+ * Test the method {@link Result#createCompleteResult(Iterable)}
* @throws Exception
*/
@Test
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
index 88f5cc4..be033e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
@@ -571,7 +571,7 @@ public class TestSerialization {
protected static final byte [][] COLUMNS = {fam1, fam2, fam3};
/**
- * Create a table of name <code>name</code> with {@link COLUMNS} for
+ * Create a table of name <code>name</code> with {@link #COLUMNS} for
* families.
* @param name Name to give table.
* @return Column descriptor.
@@ -581,7 +581,7 @@ public class TestSerialization {
}
/**
- * Create a table of name <code>name</code> with {@link COLUMNS} for
+ * Create a table of name <code>name</code> with {@link #COLUMNS} for
* families.
* @param name Name to give table.
* @param versions How many versions to allow per column.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
index a25c4af..807d59a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
@@ -41,7 +41,7 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
/**
- * Run tests related to {@link TimestampsFilter} using HBase client APIs.
+ * Run tests related to {@link org.apache.hadoop.hbase.filter.TimestampsFilter} using HBase client APIs.
* Sets up the HBase mini cluster once at start. Each creates a table
* named for the method and does its stuff against that.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java
index dbda361..60e3514 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java
@@ -40,7 +40,7 @@ public class TestFirstKeyValueMatchingQualifiersFilter extends TestCase {
/**
* Test the functionality of
- * {@link FirstKeyValueMatchingQualifiersFilter#filterKeyValue(KeyValue)}
+ * {@link FirstKeyValueMatchingQualifiersFilter#filterKeyValue(org.apache.hadoop.hbase.Cell)}
*
* @throws Exception
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
index d654bce..6531d2c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
@@ -55,7 +55,7 @@ import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/**
- * Test {@link HFileScanner#seekTo(byte[])} and its variants.
+ * Test {@link HFileScanner#seekTo(Cell)} and its variants.
*/
@Category({IOTests.class, SmallTests.class})
@RunWith(Parameterized.class)
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 3c1bed8..20fc992 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -113,7 +113,7 @@ import org.junit.rules.TestRule;
import org.mockito.Mockito;
/**
- * Simple test for {@link CellSortReducer} and {@link HFileOutputFormat2}.
+ * Simple test for {@link HFileOutputFormat2}.
* Sets up and runs a mapreduce job that writes hfile output.
* Creates a few inner classes to implement splits and an inputformat that
* emits keys and values like those of {@link PerformanceEvaluation}.
@@ -684,9 +684,8 @@ public class TestHFileOutputFormat2 {
}
/**
- * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table,
- * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
- * (Configuration)}.
+ * Test for {@link HFileOutputFormat2#configureCompression(Configuration, HTableDescriptor)} and
+ * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}.
* Tests that the compression map is correctly serialized into
* and deserialized from configuration
*
@@ -754,9 +753,8 @@ public class TestHFileOutputFormat2 {
/**
- * Test for {@link HFileOutputFormat2#configureBloomType(org.apache.hadoop.hbase.client.Table,
- * Configuration)} and {@link HFileOutputFormat2#createFamilyBloomTypeMap
- * (Configuration)}.
+ * Test for {@link HFileOutputFormat2#configureBloomType(HTableDescriptor, Configuration)} and
+ * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}.
* Tests that the compression map is correctly serialized into
* and deserialized from configuration
*
@@ -824,9 +822,8 @@ public class TestHFileOutputFormat2 {
}
/**
- * Test for {@link HFileOutputFormat2#configureBlockSize(org.apache.hadoop.hbase.client.Table,
- * Configuration)} and {@link HFileOutputFormat2#createFamilyBlockSizeMap
- * (Configuration)}.
+ * Test for {@link HFileOutputFormat2#configureBlockSize(HTableDescriptor, Configuration)} and
+ * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}.
* Tests that the compression map is correctly serialized into
* and deserialized from configuration
*
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 1866a35..dc59817 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -700,7 +700,7 @@ public class TestImportExport {
}
/**
- * This listens to the {@link #visitLogEntryBeforeWrite(HTableDescriptor, WALKey, WALEdit)} to
+ * This listens to the {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} to
* identify that an entry is written to the Write Ahead Log for the given table.
*/
private static class TableWALActionListener extends WALActionsListener.Base {
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index b653e3f..78c8214 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -139,7 +139,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
/**
* Map of regions to map of rows and {@link Result}. Used as data source when
- * {@link MockRegionServer#get(byte[], Get)} is called. Because we have a byte
+ * {@link #get(RpcController, GetRequest)} is called. Because we have a byte
* key, need to use TreeMap and provide a Comparator. Use
* {@link #setGetResult(byte[], byte[], Result)} filling this map.
*/
@@ -190,7 +190,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
}
/**
- * Use this method filling the backing data source used by {@link #get(byte[], Get)}
+ * Use this method filling the backing data source used by {@link #get(RpcController, GetRequest)}
* @param regionName
* @param row
* @param r
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
index f33bc98..ec7ffe6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
@@ -46,7 +46,7 @@ import org.junit.After;
import org.junit.Test;
/**
- * Run tests that use the HBase clients; {@link HTable}.
+ * Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.HTable}.
* Sets up the HBase mini cluster once at start and runs through all client tests.
* Each creates a table named for the method and does its stuff against that.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java
index 0d31108..e2e641e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java
@@ -224,8 +224,8 @@ public class TestProcedureMember {
/**
* Fail correctly if coordinator aborts the procedure. The subprocedure will not interrupt a
- * running {@link Subprocedure#prepare} -- prepare needs to finish first, and the the abort
- * is checked. Thus, the {@link Subprocedure#prepare} should succeed but later get rolled back
+ * running {@link Subprocedure#acquireBarrier()} -- prepare needs to finish first, and the the abort
+ * is checked. Thus, the {@link Subprocedure#acquireBarrier()} should succeed but later get rolled back
* via {@link Subprocedure#cleanup}.
*/
@Test(timeout = 60000)
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
index dd20259..a074a9a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
@@ -69,7 +69,7 @@ public class DataBlockEncodingTool {
/**
* How many times to run the benchmark. More times means better data in terms
* of statistics but slower execution. Has to be strictly larger than
- * {@link DEFAULT_BENCHMARK_N_OMIT}.
+ * {@link #DEFAULT_BENCHMARK_N_OMIT}.
*/
private static final int DEFAULT_BENCHMARK_N_TIMES = 12;
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
index 036c11c..07c141c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
@@ -24,7 +24,6 @@ import java.util.List;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -32,7 +31,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequ
/**
* A region server that will OOME.
- * Everytime {@link #put(regionName, Durability)} is called, we add
+ * Everytime {@link #put(byte[], Put)} is called, we add
* keep around a reference to the batch. Use this class to test OOME extremes.
* Needs to be started manually as in
* <code>${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start</code>.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index 358aabd..c04f2d4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -88,12 +88,6 @@ import com.google.common.base.Joiner;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-/**
- * Like {@link TestRegionMergeTransaction} in that we're testing
- * {@link RegionMergeTransactionImpl} only the below tests are against a running
- * cluster where {@link TestRegionMergeTransaction} is tests against bare
- * {@link HRegion}.
- */
@Category({RegionServerTests.class, MediumTests.class})
public class TestRegionMergeTransactionOnCluster {
private static final Log LOG = LogFactory
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
index 9a5e6f1..27c4282 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.util.ToolRunner;
/**
* A command-line utility that reads, writes, and verifies data. Unlike
- * {@link PerformanceEvaluation}, this tool validates the data written,
+ * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written,
* and supports simultaneously writing and reading the same set of keys.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
http://git-wip-us.apache.org/repos/asf/hbase/blob/55d6dcaf/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 2b68719..e4e0af5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -78,8 +78,8 @@ public abstract class MultiThreadedAction {
* Default implementation of LoadTestDataGenerator that uses LoadTestKVGenerator, fixed
* set of column families, and random number of columns in range. The table for it can
* be created manually or, for example, via
- * {@link HBaseTestingUtility#createPreSplitLoadTestTable(
- * org.apache.hadoop.hbase.Configuration, byte[], byte[], Algorithm, DataBlockEncoding)}
+ * {@link org.apache.hadoop.hbase.HBaseTestingUtility#createPreSplitLoadTestTable(Configuration, TableName, byte[],
+ * org.apache.hadoop.hbase.io.compress.Compression.Algorithm, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)}
*/
public static class DefaultDataGenerator extends LoadTestDataGenerator {
private byte[][] columnFamilies = null;
[12/23] hbase git commit: HBASE-17815 Remove the unused field in
PrefixTreeSeeker
Posted by sy...@apache.org.
HBASE-17815 Remove the unused field in PrefixTreeSeeker
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f2d1b8db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f2d1b8db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f2d1b8db
Branch: refs/heads/hbase-12439
Commit: f2d1b8db89cee7dad675639a50dab9f3c08f219f
Parents: 9410709
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Wed Mar 22 01:37:58 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Thu Mar 23 03:07:49 2017 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/f2d1b8db/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
index addb9f1..2433e8b 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.util.ClassSize;
@InterfaceAudience.Private
public class PrefixTreeSeeker implements EncodedSeeker {
- protected ByteBuffer block;
protected boolean includeMvccVersion;
protected PrefixTreeArraySearcher ptSearcher;
[18/23] hbase git commit: HBASE-13395 Removed HTableInterface
Posted by sy...@apache.org.
HBASE-13395 Removed HTableInterface
Signed-off-by: CHIA-PING TSAI <ch...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50e98251
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50e98251
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50e98251
Branch: refs/heads/hbase-12439
Commit: 50e9825139d9abfd280eb7a930c8c6a96e9e68a6
Parents: 6bd3109
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Fri Nov 18 23:53:36 2016 +0100
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Sun Mar 26 00:04:21 2017 +0800
----------------------------------------------------------------------
.../hbase/client/ConnectionImplementation.java | 4 +-
.../hadoop/hbase/client/HTableInterface.java | 138 -------------------
src/main/asciidoc/_chapters/architecture.adoc | 8 +-
src/main/asciidoc/_chapters/cp.adoc | 3 +-
src/main/asciidoc/_chapters/unit_testing.adoc | 38 ++---
5 files changed, 25 insertions(+), 166 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/50e98251/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index adf1496..99feb14 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -170,10 +170,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
// be waiting for the master lock => deadlock.
private final Object masterAndZKLock = new Object();
- // thread executor shared by all HTableInterface instances created
+ // thread executor shared by all Table instances created
// by this connection
private volatile ExecutorService batchPool = null;
- // meta thread executor shared by all HTableInterface instances created
+ // meta thread executor shared by all Table instances created
// by this connection
private volatile ExecutorService metaLookupPool = null;
private volatile boolean cleanupPool = false;
http://git-wip-us.apache.org/repos/asf/hbase/blob/50e98251/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
deleted file mode 100644
index 9d41218..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * Used to communicate with a single HBase table.
- * Obtain an instance from a {@link Connection}.
- *
- * @since 0.21.0
- * @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead
- */
-@Deprecated
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public interface HTableInterface extends Table {
-
- /**
- * Gets the name of this table.
- *
- * @return the table name.
- * @deprecated Use {@link #getName()} instead
- */
- @Deprecated
- byte[] getTableName();
-
- /**
- * Turns 'auto-flush' on or off.
- * <p>
- * When enabled (default), {@link Put} operations don't get buffered/delayed
- * and are immediately executed. Failed operations are not retried. This is
- * slower but safer.
- * <p>
- * Turning off {@code #autoFlush} means that multiple {@link Put}s will be
- * accepted before any RPC is actually sent to do the write operations. If the
- * application dies before pending writes get flushed to HBase, data will be
- * lost.
- * <p>
- * When you turn {@code #autoFlush} off, you should also consider the
- * {@code #clearBufferOnFail} option. By default, asynchronous {@link Put}
- * requests will be retried on failure until successful. However, this can
- * pollute the writeBuffer and slow down batching performance. Additionally,
- * you may want to issue a number of Put requests and call
- * {@link #flushCommits()} as a barrier. In both use cases, consider setting
- * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()}
- * has been called, regardless of success.
- * <p>
- * In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each
- * flushCommit, including the last one when closing the table. This is NOT recommended,
- * most of the time you want to call {@code #setAutoFlush(false, true)}.
- *
- * @param autoFlush
- * Whether or not to enable 'auto-flush'.
- * @param clearBufferOnFail
- * Whether to keep Put failures in the writeBuffer. If autoFlush is true, then
- * the value of this parameter is ignored and clearBufferOnFail is set to true.
- * Setting clearBufferOnFail to false is deprecated since 0.96.
- * @deprecated in 0.99 since setting clearBufferOnFail is deprecated.
- * @see BufferedMutator#flush()
- */
- @Deprecated
- void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail);
-
- /**
- * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail}.
- * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Move on to
- * {@link BufferedMutator}
- */
- @Deprecated
- void setAutoFlushTo(boolean autoFlush);
-
- /**
- * Tells whether or not 'auto-flush' is turned on.
- *
- * @return {@code true} if 'auto-flush' is enabled (default), meaning
- * {@link Put} operations don't get buffered/delayed and are immediately
- * executed.
- * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator}
- */
- @Deprecated
- boolean isAutoFlush();
-
- /**
- * Executes all the buffered {@link Put} operations.
- * <p>
- * This method gets called once automatically for every {@link Put} or batch
- * of {@link Put}s (when <code>put(List<Put>)</code> is used) when
- * {@link #isAutoFlush} is {@code true}.
- * @throws IOException if a remote or network exception occurs.
- * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#flush()}
- */
- @Deprecated
- void flushCommits() throws IOException;
-
- /**
- * Returns the maximum size in bytes of the write buffer for this HTable.
- * <p>
- * The default value comes from the configuration parameter
- * {@code hbase.client.write.buffer}.
- * @return The size of the write buffer in bytes.
- * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#getWriteBufferSize()}
- */
- @Deprecated
- long getWriteBufferSize();
-
- /**
- * Sets the size of the buffer in bytes.
- * <p>
- * If the new size is less than the current amount of data in the
- * write buffer, the buffer gets flushed.
- * @param writeBufferSize The new write buffer size, in bytes.
- * @throws IOException if a remote or network exception occurs.
- * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator} and
- * {@link BufferedMutatorParams#writeBufferSize(long)}
- */
- @Deprecated
- void setWriteBufferSize(long writeBufferSize) throws IOException;
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/50e98251/src/main/asciidoc/_chapters/architecture.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index 9768c96..773d237 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -227,8 +227,6 @@ try (Connection connection = ConnectionFactory.createConnection(conf)) {
----
====
-Constructing HTableInterface implementation is very lightweight and resources are controlled.
-
.`HTablePool` is Deprecated
[WARNING]
====
@@ -398,7 +396,7 @@ Example: Find all columns in a row and family that start with "abc"
[source,java]
----
-HTableInterface t = ...;
+Table t = ...;
byte[] row = ...;
byte[] family = ...;
byte[] prefix = Bytes.toBytes("abc");
@@ -428,7 +426,7 @@ Example: Find all columns in a row and family that start with "abc" or "xyz"
[source,java]
----
-HTableInterface t = ...;
+Table t = ...;
byte[] row = ...;
byte[] family = ...;
byte[][] prefixes = new byte[][] {Bytes.toBytes("abc"), Bytes.toBytes("xyz")};
@@ -463,7 +461,7 @@ Example: Find all columns in a row and family between "bbbb" (inclusive) and "bb
[source,java]
----
-HTableInterface t = ...;
+Table t = ...;
byte[] row = ...;
byte[] family = ...;
byte[] startColumn = Bytes.toBytes("bbbb");
http://git-wip-us.apache.org/repos/asf/hbase/blob/50e98251/src/main/asciidoc/_chapters/cp.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/cp.adoc b/src/main/asciidoc/_chapters/cp.adoc
index 47f92bb..d3fcd47 100644
--- a/src/main/asciidoc/_chapters/cp.adoc
+++ b/src/main/asciidoc/_chapters/cp.adoc
@@ -180,8 +180,7 @@ In contrast to observer coprocessors, where your code is run transparently, endp
coprocessors must be explicitly invoked using the
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#coprocessorService%28java.lang.Class,%20byte%5B%5D,%20byte%5B%5D,%20org.apache.hadoop.hbase.client.coprocessor.Batch.Call%29[CoprocessorService()]
method available in
-link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html[Table],
-link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTableInterface.html[HTableInterface],
+link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html[Table]
or
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTable.html[HTable].
http://git-wip-us.apache.org/repos/asf/hbase/blob/50e98251/src/main/asciidoc/_chapters/unit_testing.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/unit_testing.adoc b/src/main/asciidoc/_chapters/unit_testing.adoc
index 0c4d812..6131d5a 100644
--- a/src/main/asciidoc/_chapters/unit_testing.adoc
+++ b/src/main/asciidoc/_chapters/unit_testing.adoc
@@ -295,28 +295,28 @@ public class MyHBaseIntegrationTest {
@Before
public void setup() throws Exception {
- utility = new HBaseTestingUtility();
- utility.startMiniCluster();
+ utility = new HBaseTestingUtility();
+ utility.startMiniCluster();
}
@Test
- public void testInsert() throws Exception {
- HTableInterface table = utility.createTable(Bytes.toBytes("MyTest"), CF);
- HBaseTestObj obj = new HBaseTestObj();
- obj.setRowKey("ROWKEY-1");
- obj.setData1("DATA-1");
- obj.setData2("DATA-2");
- MyHBaseDAO.insertRecord(table, obj);
- Get get1 = new Get(Bytes.toBytes(obj.getRowKey()));
- get1.addColumn(CF, CQ1);
- Result result1 = table.get(get1);
- assertEquals(Bytes.toString(result1.getRow()), obj.getRowKey());
- assertEquals(Bytes.toString(result1.value()), obj.getData1());
- Get get2 = new Get(Bytes.toBytes(obj.getRowKey()));
- get2.addColumn(CF, CQ2);
- Result result2 = table.get(get2);
- assertEquals(Bytes.toString(result2.getRow()), obj.getRowKey());
- assertEquals(Bytes.toString(result2.value()), obj.getData2());
+ public void testInsert() throws Exception {
+ Table table = utility.createTable(Bytes.toBytes("MyTest"), CF);
+ HBaseTestObj obj = new HBaseTestObj();
+ obj.setRowKey("ROWKEY-1");
+ obj.setData1("DATA-1");
+ obj.setData2("DATA-2");
+ MyHBaseDAO.insertRecord(table, obj);
+ Get get1 = new Get(Bytes.toBytes(obj.getRowKey()));
+ get1.addColumn(CF, CQ1);
+ Result result1 = table.get(get1);
+ assertEquals(Bytes.toString(result1.getRow()), obj.getRowKey());
+ assertEquals(Bytes.toString(result1.value()), obj.getData1());
+ Get get2 = new Get(Bytes.toBytes(obj.getRowKey()));
+ get2.addColumn(CF, CQ2);
+ Result result2 = table.get(get2);
+ assertEquals(Bytes.toString(result2.getRow()), obj.getRowKey());
+ assertEquals(Bytes.toString(result2.value()), obj.getData2());
}
}
----
[08/23] hbase git commit: HBASE-17655 Removing MemStoreScanner and
SnapshotScanner
Posted by sy...@apache.org.
HBASE-17655 Removing MemStoreScanner and SnapshotScanner
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f4ae0a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f4ae0a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f4ae0a0
Branch: refs/heads/hbase-12439
Commit: 8f4ae0a0dcb658c4fe669bc4cdc68ad8e6219daf
Parents: cc59fe4
Author: eshcar <es...@yahoo-inc.com>
Authored: Tue Mar 21 12:32:59 2017 +0200
Committer: eshcar <es...@yahoo-inc.com>
Committed: Tue Mar 21 12:35:47 2017 +0200
----------------------------------------------------------------------
.../example/ZooKeeperScanPolicyObserver.java | 4 +-
.../hbase/coprocessor/RegionObserver.java | 35 +-
.../hbase/mob/DefaultMobStoreFlusher.java | 2 +-
.../hbase/regionserver/AbstractMemStore.java | 14 +
.../hbase/regionserver/CompactingMemStore.java | 21 +-
.../regionserver/CompositeImmutableSegment.java | 33 +-
.../hbase/regionserver/DefaultMemStore.java | 15 +-
.../hbase/regionserver/DefaultStoreFlusher.java | 2 +-
.../hbase/regionserver/ImmutableSegment.java | 12 +-
.../hbase/regionserver/MemStoreCompactor.java | 2 +-
.../MemStoreCompactorSegmentsIterator.java | 17 +-
.../MemStoreMergerSegmentsIterator.java | 52 ++-
.../hbase/regionserver/MemStoreScanner.java | 334 -------------------
.../regionserver/MemStoreSegmentsIterator.java | 23 +-
.../hbase/regionserver/MemStoreSnapshot.java | 15 +-
.../regionserver/RegionCoprocessorHost.java | 7 +-
.../hadoop/hbase/regionserver/Segment.java | 8 +-
.../hbase/regionserver/SegmentScanner.java | 13 +-
.../hbase/regionserver/SnapshotScanner.java | 105 ------
.../hadoop/hbase/regionserver/StoreFlusher.java | 8 +-
.../hbase/regionserver/StripeStoreFlusher.java | 2 +-
.../hbase/coprocessor/SimpleRegionObserver.java | 2 +-
.../TestRegionObserverScannerOpenHook.java | 6 +-
.../regionserver/NoOpScanPolicyObserver.java | 4 +-
.../regionserver/TestCompactingMemStore.java | 30 +-
.../TestCompactingToCellArrayMapMemStore.java | 32 +-
.../hbase/regionserver/TestDefaultMemStore.java | 20 +-
.../regionserver/TestMemStoreChunkPool.java | 14 +-
.../regionserver/TestReversibleScanners.java | 66 +++-
.../hbase/util/TestCoprocessorScanPolicy.java | 5 +-
30 files changed, 262 insertions(+), 641 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
index 2343c1d..b7df9b4 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
@@ -188,7 +188,7 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
if (scanInfo == null) {
// take default action
@@ -196,7 +196,7 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
}
Scan scan = new Scan();
scan.setMaxVersions(scanInfo.getMaxVersions());
- return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
+ return new StoreScanner(store, scanInfo, scan, scanners,
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index a3db3b1..e36feea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.coprocessor;
import com.google.common.collect.ImmutableList;
import java.io.IOException;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
@@ -128,16 +129,16 @@ public interface RegionObserver extends Coprocessor {
* effect in this hook.
* @param c the environment provided by the region server
* @param store the store being flushed
- * @param memstoreScanner the scanner for the memstore that is flushed
+ * @param scanners the scanners for the memstore that is flushed
* @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
* @return the scanner to use during the flush. {@code null} if the default implementation
* is to be used.
- * @deprecated Use {@link #preFlushScannerOpen(ObserverContext, Store, KeyValueScanner,
+ * @deprecated Use {@link #preFlushScannerOpen(ObserverContext, Store, List,
* InternalScanner, long)}
*/
@Deprecated
default InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final KeyValueScanner memstoreScanner, final InternalScanner s)
+ final Store store, final List<KeyValueScanner> scanners, final InternalScanner s)
throws IOException {
return s;
}
@@ -151,16 +152,32 @@ public interface RegionObserver extends Coprocessor {
* effect in this hook.
* @param c the environment provided by the region server
* @param store the store being flushed
- * @param memstoreScanner the scanner for the memstore that is flushed
+ * @param scanners the scanners for the memstore that is flushed
* @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
* @param readPoint the readpoint to create scanner
* @return the scanner to use during the flush. {@code null} if the default implementation
* is to be used.
*/
default InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final KeyValueScanner memstoreScanner, final InternalScanner s,
+ final Store store, final List<KeyValueScanner> scanners, final InternalScanner s,
final long readPoint) throws IOException {
- return preFlushScannerOpen(c, store, memstoreScanner, s);
+ return preFlushScannerOpen(c, store, scanners, s);
+ }
+
+ /**
+ * Maintain backward compatibility.
+ * @param c the environment provided by the region server
+ * @param store the store being flushed
+ * @param scanner the scanner for the memstore that is flushed
+ * @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain
+ * @param readPoint the readpoint to create scanner
+ * @return the scanner to use during the flush. {@code null} if the default implementation
+ * is to be used.
+ */
+ default InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final KeyValueScanner scanner, final InternalScanner s,
+ final long readPoint) throws IOException {
+ return preFlushScannerOpen(c, store, Collections.singletonList(scanner), s, readPoint);
}
/**
@@ -1113,8 +1130,7 @@ public interface RegionObserver extends Coprocessor {
* Called before a store opens a new scanner.
* This hook is called when a "user" scanner is opened.
* <p>
- * See {@link #preFlushScannerOpen(ObserverContext, Store, KeyValueScanner, InternalScanner,
- * long)} and {@link #preCompactScannerOpen(ObserverContext,
+ * See {@link #preFlushScannerOpen(ObserverContext, Store, List, InternalScanner, long)} and {@link #preCompactScannerOpen(ObserverContext,
* Store, List, ScanType, long, InternalScanner, CompactionRequest, long)}
* to override scanners created for flushes or compactions, resp.
* <p>
@@ -1145,8 +1161,7 @@ public interface RegionObserver extends Coprocessor {
* Called before a store opens a new scanner.
* This hook is called when a "user" scanner is opened.
* <p>
- * See {@link #preFlushScannerOpen(ObserverContext, Store, KeyValueScanner, InternalScanner,
- * long)} and {@link #preCompactScannerOpen(ObserverContext,
+ * See {@link #preFlushScannerOpen(ObserverContext, Store, List, InternalScanner, long)} and {@link #preCompactScannerOpen(ObserverContext,
* Store, List, ScanType, long, InternalScanner, CompactionRequest, long)}
* to override scanners created for flushes or compactions, resp.
* <p>
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 2456a41..1a1c5a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -104,7 +104,7 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher {
// Use a store scanner to find which rows to flush.
long smallestReadPoint = store.getSmallestReadPoint();
- InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
+ InternalScanner scanner = createScanner(snapshot.getScanners(), smallestReadPoint);
if (scanner == null) {
return result; // NULL scanner returned from coprocessor hooks means skip normal processing
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index d44486c..cff2b27 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -60,6 +60,20 @@ public abstract class AbstractMemStore implements MemStore {
public final static long DEEP_OVERHEAD = FIXED_OVERHEAD;
+ public static long addToScanners(List<? extends Segment> segments, long readPt, long order,
+ List<KeyValueScanner> scanners) {
+ for (Segment item : segments) {
+ order = addToScanners(item, readPt, order, scanners);
+ }
+ return order;
+ }
+
+ protected static long addToScanners(Segment segment, long readPt, long order,
+ List<KeyValueScanner> scanners) {
+ scanners.add(segment.getScanner(readPt, order));
+ return order - 1;
+ }
+
protected AbstractMemStore(final Configuration conf, final CellComparator c) {
this.conf = conf;
this.comparator = c;
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 926b3f7..26b2f49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -318,21 +317,15 @@ public class CompactingMemStore extends AbstractMemStore {
*/
public List<KeyValueScanner> getScanners(long readPt) throws IOException {
List<? extends Segment> pipelineList = pipeline.getSegments();
- int order = pipelineList.size() + snapshot.getNumOfSegments();
+ List<? extends Segment> snapshotList = snapshot.getAllSegments();
+ long order = 1 + pipelineList.size() + snapshotList.size();
// The list of elements in pipeline + the active element + the snapshot segment
- // TODO : This will change when the snapshot is made of more than one element
// The order is the Segment ordinal
- List<KeyValueScanner> list = new ArrayList<>(order+1);
- list.add(this.active.getScanner(readPt, order + 1));
- for (Segment item : pipelineList) {
- list.add(item.getScanner(readPt, order));
- order--;
- }
- for (Segment item : snapshot.getAllSegments()) {
- list.add(item.getScanner(readPt, order));
- order--;
- }
- return Collections.<KeyValueScanner> singletonList(new MemStoreScanner(getComparator(), list));
+ List<KeyValueScanner> list = new ArrayList<KeyValueScanner>((int) order);
+ order = addToScanners(active, readPt, order, list);
+ order = addToScanners(pipelineList, readPt, order, list);
+ addToScanners(snapshotList, readPt, order, list);
+ return list;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index eeade4f..2f89ec7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
@@ -72,16 +71,6 @@ public class CompositeImmutableSegment extends ImmutableSegment {
}
/**
- * Builds a special scanner for the MemStoreSnapshot object that is different than the
- * general segment scanner.
- * @return a special scanner for the MemStoreSnapshot object
- */
- @Override
- public KeyValueScanner getSnapshotScanner() {
- return getScanner(Long.MAX_VALUE, Long.MAX_VALUE);
- }
-
- /**
* @return whether the segment has any cells
*/
@Override
@@ -148,8 +137,7 @@ public class CompositeImmutableSegment extends ImmutableSegment {
*/
@Override
public KeyValueScanner getScanner(long readPoint) {
- // Long.MAX_VALUE is DEFAULT_SCANNER_ORDER
- return getScanner(readPoint,Long.MAX_VALUE);
+ throw new IllegalStateException("Not supported by CompositeImmutableScanner");
}
/**
@@ -158,19 +146,14 @@ public class CompositeImmutableSegment extends ImmutableSegment {
*/
@Override
public KeyValueScanner getScanner(long readPoint, long order) {
- KeyValueScanner resultScanner;
- List<KeyValueScanner> list = new ArrayList<>(segments.size());
- for (ImmutableSegment s : segments) {
- list.add(s.getScanner(readPoint, order));
- }
-
- try {
- resultScanner = new MemStoreScanner(getComparator(), list);
- } catch (IOException ie) {
- throw new IllegalStateException(ie);
- }
+ throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+ }
- return resultScanner;
+ @Override
+ public List<KeyValueScanner> getScanners(long readPoint, long order) {
+ List<KeyValueScanner> list = new ArrayList<>(segments.size());
+ AbstractMemStore.addToScanners(segments, readPoint, order, list);
+ return list;
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index b3e9c65..d1f6b1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import org.apache.commons.logging.Log;
@@ -75,10 +74,6 @@ public class DefaultMemStore extends AbstractMemStore {
super(conf, c);
}
- void dump() {
- super.dump(LOG);
- }
-
/**
* Creates a snapshot of the current memstore.
* Snapshot must be cleared by call to {@link #clearSnapshot(long)}
@@ -129,11 +124,11 @@ public class DefaultMemStore extends AbstractMemStore {
* Scanners are ordered from 0 (oldest) to newest in increasing order.
*/
public List<KeyValueScanner> getScanners(long readPt) throws IOException {
- List<KeyValueScanner> list = new ArrayList<>(2);
- list.add(this.active.getScanner(readPt, 1));
- list.add(this.snapshot.getScanner(readPt, 0));
- return Collections.<KeyValueScanner> singletonList(
- new MemStoreScanner(getComparator(), list));
+ List<KeyValueScanner> list = new ArrayList<>();
+ long order = snapshot.getNumOfSegments();
+ order = addToScanners(active, readPt, order, list);
+ addToScanners(snapshot.getAllSegments(), readPt, order, list);
+ return list;
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
index 8cb3a1d..ef49f29 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
@@ -52,7 +52,7 @@ public class DefaultStoreFlusher extends StoreFlusher {
// Use a store scanner to find which rows to flush.
long smallestReadPoint = store.getSmallestReadPoint();
- InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
+ InternalScanner scanner = createScanner(snapshot.getScanners(), smallestReadPoint);
if (scanner == null) {
return result; // NULL scanner returned from coprocessor hooks means skip normal processing
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index c8d27b2..f1273a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -34,9 +34,7 @@ import java.util.List;
/**
* ImmutableSegment is an abstract class that extends the API supported by a {@link Segment},
- * and is not needed for a {@link MutableSegment}. Specifically, the method
- * {@link ImmutableSegment#getSnapshotScanner()} builds a special scanner for the
- * {@link MemStoreSnapshot} object.
+ * and is not needed for a {@link MutableSegment}.
*/
@InterfaceAudience.Private
public class ImmutableSegment extends Segment {
@@ -130,14 +128,6 @@ public class ImmutableSegment extends Segment {
}
///////////////////// PUBLIC METHODS /////////////////////
- /**
- * Builds a special scanner for the MemStoreSnapshot object that is different than the
- * general segment scanner.
- * @return a special scanner for the MemStoreSnapshot object
- */
- public KeyValueScanner getSnapshotScanner() {
- return new SnapshotScanner(this);
- }
@Override
public boolean shouldSeek(Scan scan, long oldestUnexpiredTS) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index c435098..dfa7d18 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -252,7 +252,7 @@ public class MemStoreCompactor {
iterator =
new MemStoreMergerSegmentsIterator(versionedList.getStoreSegments(),
compactingMemStore.getComparator(),
- compactionKVMax, compactingMemStore.getStore());
+ compactionKVMax);
result = SegmentFactory.instance().createImmutableSegmentByMerge(
compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator,
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
index 6a30eac..8f481e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.client.Scan;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@@ -50,11 +49,16 @@ public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator
List<ImmutableSegment> segments,
CellComparator comparator, int compactionKVMax, Store store
) throws IOException {
- super(segments,comparator,compactionKVMax,store);
+ super(compactionKVMax);
+ List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
+ // create the list of scanners to traverse over all the data
+ // no dirty reads here as these are immutable segments
+ int order = segments.size();
+ AbstractMemStore.addToScanners(segments, Integer.MAX_VALUE, order, scanners);
// build the scanner based on Query Matcher
// reinitialize the compacting scanner for each instance of iterator
- compactingScanner = createScanner(store, scanner);
+ compactingScanner = createScanner(store, scanners);
hasMore = compactingScanner.next(kvs, scannerContext);
@@ -93,7 +97,6 @@ public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator
public void close() {
compactingScanner.close();
compactingScanner = null;
- scanner = null;
}
@Override
@@ -106,13 +109,13 @@ public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator
*
* @return the scanner
*/
- private StoreScanner createScanner(Store store, KeyValueScanner scanner)
+ private StoreScanner createScanner(Store store, List<KeyValueScanner> scanners)
throws IOException {
Scan scan = new Scan();
scan.setMaxVersions(); //Get all available versions
StoreScanner internalScanner =
- new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(scanner),
+ new StoreScanner(store, store.getScanInfo(), scan, scanners,
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
@@ -146,4 +149,4 @@ public class MemStoreCompactorSegmentsIterator extends MemStoreSegmentsIterator
}
return hasMore;
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
index 625fc76..3bb814b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
/**
@@ -33,36 +34,67 @@ import java.util.List;
@InterfaceAudience.Private
public class MemStoreMergerSegmentsIterator extends MemStoreSegmentsIterator {
+ // heap of scanners, lazily initialized
+ private KeyValueHeap heap = null;
+ // remember the initial version of the scanners list
+ List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
+
+ private boolean closed = false;
+
// C-tor
public MemStoreMergerSegmentsIterator(List<ImmutableSegment> segments, CellComparator comparator,
- int compactionKVMax, Store store
- ) throws IOException {
- super(segments,comparator,compactionKVMax,store);
+ int compactionKVMax) throws IOException {
+ super(compactionKVMax);
+ // create the list of scanners to traverse over all the data
+ // no dirty reads here as these are immutable segments
+ int order = segments.size();
+ AbstractMemStore.addToScanners(segments, Integer.MAX_VALUE, order, scanners);
+ heap = new KeyValueHeap(scanners, comparator);
}
@Override
public boolean hasNext() {
- return (scanner.peek()!=null);
+ if (closed) {
+ return false;
+ }
+ if (this.heap != null) {
+ return (this.heap.peek() != null);
+ }
+ // Doing this way in case some test cases tries to peek directly
+ return false;
}
@Override
public Cell next() {
- Cell result = null;
try { // try to get next
- result = scanner.next();
+ if (!closed && heap != null) {
+ return heap.next();
+ }
} catch (IOException ie) {
throw new IllegalStateException(ie);
}
- return result;
+ return null;
}
public void close() {
- scanner.close();
- scanner = null;
+ if (closed) {
+ return;
+ }
+ // Ensuring that all the segment scanners are closed
+ if (heap != null) {
+ heap.close();
+ // It is safe to do close as no new calls will be made to this scanner.
+ heap = null;
+ } else {
+ for (KeyValueScanner scanner : scanners) {
+ scanner.close();
+ }
+ }
+ closed = true;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java
deleted file mode 100644
index 2ccdf68..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java
+++ /dev/null
@@ -1,334 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.htrace.Trace;
-
-/**
- * This is the scanner for any MemStore implementation, derived from MemStore.
- * The MemStoreScanner combines KeyValueScanner from different Segments and
- * uses the key-value heap and the reversed key-value heap for the aggregated key-values set.
- * It is assumed that only traversing forward or backward is used (without zigzagging in between)
- */
-@InterfaceAudience.Private
-public class MemStoreScanner extends NonLazyKeyValueScanner {
-
- // heap of scanners, lazily initialized
- private KeyValueHeap heap;
-
- // indicates if the scanner is created for inmemoryCompaction
- private boolean inmemoryCompaction;
-
- // remember the initial version of the scanners list
- List<KeyValueScanner> scanners;
-
- private final CellComparator comparator;
-
- private boolean closed;
-
- /**
- * Creates either a forward KeyValue heap or Reverse KeyValue heap based on the type of scan
- * and the heap is lazily initialized
- * @param comparator Cell Comparator
- * @param scanners List of scanners, from which the heap will be built
- * @param inmemoryCompaction true if used for inmemoryCompaction.
- * In this case, creates a forward heap always.
- */
- public MemStoreScanner(CellComparator comparator, List<KeyValueScanner> scanners,
- boolean inmemoryCompaction) throws IOException {
- super();
- this.comparator = comparator;
- this.scanners = scanners;
- if (Trace.isTracing() && Trace.currentSpan() != null) {
- Trace.currentSpan().addTimelineAnnotation("Creating MemStoreScanner");
- }
- this.inmemoryCompaction = inmemoryCompaction;
- if (inmemoryCompaction) {
- // init the forward scanner in case of inmemoryCompaction
- initForwardKVHeapIfNeeded(comparator, scanners);
- }
- }
-
- /**
- * Creates either a forward KeyValue heap or Reverse KeyValue heap based on the type of scan
- * and the heap is lazily initialized
- * @param comparator Cell Comparator
- * @param scanners List of scanners, from which the heap will be built
- */
- public MemStoreScanner(CellComparator comparator, List<KeyValueScanner> scanners)
- throws IOException {
- this(comparator, scanners, false);
- }
-
- private void initForwardKVHeapIfNeeded(CellComparator comparator, List<KeyValueScanner> scanners)
- throws IOException {
- if (heap == null) {
- // lazy init
- // In a normal scan case, at the StoreScanner level before the KVHeap is
- // created we do a seek or reseek. So that will happen
- // on all the scanners that the StoreScanner is
- // made of. So when we get any of those call to this scanner we init the
- // heap here with normal forward KVHeap.
- this.heap = new KeyValueHeap(scanners, comparator);
- }
- }
-
- private boolean initReverseKVHeapIfNeeded(Cell seekKey, CellComparator comparator,
- List<KeyValueScanner> scanners) throws IOException {
- boolean res = false;
- if (heap == null) {
- // lazy init
- // In a normal reverse scan case, at the ReversedStoreScanner level before the
- // ReverseKeyValueheap is
- // created we do a seekToLastRow or backwardSeek. So that will happen
- // on all the scanners that the ReversedStoreSCanner is
- // made of. So when we get any of those call to this scanner we init the
- // heap here with ReversedKVHeap.
- if (CellUtil.matchingRow(seekKey, HConstants.EMPTY_START_ROW)) {
- for (KeyValueScanner scanner : scanners) {
- res |= scanner.seekToLastRow();
- }
- } else {
- for (KeyValueScanner scanner : scanners) {
- res |= scanner.backwardSeek(seekKey);
- }
- }
- this.heap = new ReversedKeyValueHeap(scanners, comparator);
- }
- return res;
- }
-
- /**
- * Returns the cell from the top-most scanner without advancing the iterator.
- * The backward traversal is assumed, only if specified explicitly
- */
- @Override
- public Cell peek() {
- if (closed) {
- return null;
- }
- if (this.heap != null) {
- return this.heap.peek();
- }
- // Doing this way in case some test cases tries to peek directly to avoid NPE
- return null;
- }
-
- /**
- * Gets the next cell from the top-most scanner. Assumed forward scanning.
- */
- @Override
- public Cell next() throws IOException {
- if (closed) {
- return null;
- }
- if(this.heap != null) {
- // loop over till the next suitable value
- // take next value from the heap
- for (Cell currentCell = heap.next();
- currentCell != null;
- currentCell = heap.next()) {
- // all the logic of presenting cells is inside the internal KeyValueScanners
- // located inside the heap
- return currentCell;
- }
- }
- return null;
- }
-
- /**
- * Set the scanner at the seek key. Assumed forward scanning.
- * Must be called only once: there is no thread safety between the scanner
- * and the memStore.
- *
- * @param cell seek value
- * @return false if the key is null or if there is no data
- */
- @Override
- public boolean seek(Cell cell) throws IOException {
- if (closed) {
- return false;
- }
- initForwardKVHeapIfNeeded(comparator, scanners);
-
- if (cell == null) {
- close();
- return false;
- }
-
- return heap.seek(cell);
- }
-
- /**
- * Move forward on the sub-lists set previously by seek. Assumed forward scanning.
- *
- * @param cell seek value (should be non-null)
- * @return true if there is at least one KV to read, false otherwise
- */
- @Override
- public boolean reseek(Cell cell) throws IOException {
- /*
- * See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation.
- * This code is executed concurrently with flush and puts, without locks.
- * Two points must be known when working on this code:
- * 1) It's not possible to use the 'kvTail' and 'snapshot'
- * variables, as they are modified during a flush.
- * 2) The ideal implementation for performance would use the sub skip list
- * implicitly pointed by the iterators 'kvsetIt' and
- * 'snapshotIt'. Unfortunately the Java API does not offer a method to
- * get it. So we remember the last keys we iterated to and restore
- * the reseeked set to at least that point.
- *
- * TODO: The above comment copied from the original MemStoreScanner
- */
- if (closed) {
- return false;
- }
- initForwardKVHeapIfNeeded(comparator, scanners);
- return heap.reseek(cell);
- }
-
- /**
- * MemStoreScanner returns Long.MAX_VALUE because it will always have the latest data among all
- * scanners.
- * @see KeyValueScanner#getScannerOrder()
- */
- @Override
- public long getScannerOrder() {
- return Long.MAX_VALUE;
- }
-
- @Override
- public void close() {
- if (closed) {
- return;
- }
- // Ensuring that all the segment scanners are closed
- if (heap != null) {
- heap.close();
- // It is safe to do close as no new calls will be made to this scanner.
- heap = null;
- } else {
- for (KeyValueScanner scanner : scanners) {
- scanner.close();
- }
- }
- closed = true;
- }
-
- /**
- * Set the scanner at the seek key. Assumed backward scanning.
- *
- * @param cell seek value
- * @return false if the key is null or if there is no data
- */
- @Override
- public boolean backwardSeek(Cell cell) throws IOException {
- // The first time when this happens it sets the scanners to the seek key
- // passed by the incoming scan's start row
- if (closed) {
- return false;
- }
- initReverseKVHeapIfNeeded(cell, comparator, scanners);
- return heap.backwardSeek(cell);
- }
-
- /**
- * Assumed backward scanning.
- *
- * @param cell seek value
- * @return false if the key is null or if there is no data
- */
- @Override
- public boolean seekToPreviousRow(Cell cell) throws IOException {
- if (closed) {
- return false;
- }
- initReverseKVHeapIfNeeded(cell, comparator, scanners);
- if (heap.peek() == null) {
- restartBackwardHeap(cell);
- }
- return heap.seekToPreviousRow(cell);
- }
-
- @Override
- public boolean seekToLastRow() throws IOException {
- if (closed) {
- return false;
- }
- return initReverseKVHeapIfNeeded(KeyValue.LOWESTKEY, comparator, scanners);
- }
-
- /**
- * Check if this memstore may contain the required keys
- * @return False if the key definitely does not exist in this Memstore
- */
- @Override
- public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
- // TODO : Check if this can be removed.
- if (inmemoryCompaction) {
- return true;
- }
-
- for (KeyValueScanner sc : scanners) {
- if (sc.shouldUseScanner(scan, store, oldestUnexpiredTS)) {
- return true;
- }
- }
- return false;
- }
-
- // debug method
- @Override
- public String toString() {
- StringBuffer buf = new StringBuffer();
- int i = 1;
- for (KeyValueScanner scanner : scanners) {
- buf.append("scanner (" + i + ") " + scanner.toString() + " ||| ");
- i++;
- }
- return buf.toString();
- }
- /****************** Private methods ******************/
- /**
- * Restructure the ended backward heap after rerunning a seekToPreviousRow()
- * on each scanner
- * @return false if given Cell does not exist in any scanner
- */
- private boolean restartBackwardHeap(Cell cell) throws IOException {
- boolean res = false;
- for (KeyValueScanner scan : scanners) {
- res |= scan.seekToPreviousRow(cell);
- }
- this.heap =
- new ReversedKeyValueHeap(scanners, comparator);
- return res;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
index 7728534..048f746 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java
@@ -20,11 +20,10 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import java.io.IOException;
-import java.util.*;
+import java.util.Iterator;
/**
* The MemStoreSegmentsIterator is designed to perform one iteration over given list of segments
@@ -35,29 +34,11 @@ import java.util.*;
@InterfaceAudience.Private
public abstract class MemStoreSegmentsIterator implements Iterator<Cell> {
- // scanner for full or partial pipeline (heap of segment scanners)
- // we need to keep those scanners in order to close them at the end
- protected KeyValueScanner scanner;
-
protected final ScannerContext scannerContext;
-
// C-tor
- public MemStoreSegmentsIterator(List<ImmutableSegment> segments, CellComparator comparator,
- int compactionKVMax, Store store) throws IOException {
-
+ public MemStoreSegmentsIterator(int compactionKVMax) throws IOException {
this.scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
-
- // list of Scanners of segments in the pipeline, when compaction starts
- List<KeyValueScanner> scanners = new ArrayList<>();
-
- // create the list of scanners to traverse over all the data
- // no dirty reads here as these are immutable segments
- for (ImmutableSegment segment : segments) {
- scanners.add(segment.getScanner(Integer.MAX_VALUE));
- }
-
- scanner = new MemStoreScanner(comparator, scanners, true);
}
public abstract void close();
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
index 3858b1c..dd7f957 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import java.util.List;
/**
* Holds details of the snapshot taken on a MemStore. Details include the snapshot's identifier,
* count of cells in it and total memory size occupied by all the cells, timestamp information of
@@ -31,7 +32,7 @@ public class MemStoreSnapshot {
private final long dataSize;
private final long heapSize;
private final TimeRangeTracker timeRangeTracker;
- private final KeyValueScanner scanner;
+ private final List<KeyValueScanner> scanners;
private final boolean tagsPresent;
public MemStoreSnapshot(long id, ImmutableSegment snapshot) {
@@ -40,7 +41,7 @@ public class MemStoreSnapshot {
this.dataSize = snapshot.keySize();
this.heapSize = snapshot.heapSize();
this.timeRangeTracker = snapshot.getTimeRangeTracker();
- this.scanner = snapshot.getSnapshotScanner();
+ this.scanners = snapshot.getScanners(Long.MAX_VALUE, Long.MAX_VALUE);
this.tagsPresent = snapshot.isTagsPresent();
}
@@ -66,21 +67,21 @@ public class MemStoreSnapshot {
}
public long getHeapSize() {
- return this.heapSize;
+ return heapSize;
}
/**
* @return {@link TimeRangeTracker} for all the Cells in the snapshot.
*/
public TimeRangeTracker getTimeRangeTracker() {
- return this.timeRangeTracker;
+ return timeRangeTracker;
}
/**
* @return {@link KeyValueScanner} for iterating over the snapshot
*/
- public KeyValueScanner getScanner() {
- return this.scanner;
+ public List<KeyValueScanner> getScanners() {
+ return scanners;
}
/**
@@ -89,4 +90,4 @@ public class MemStoreSnapshot {
public boolean isTagsPresent() {
return this.tagsPresent;
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 925e349..64823b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -629,17 +629,16 @@ public class RegionCoprocessorHost
/**
* See
- * {@link RegionObserver#preFlushScannerOpen(ObserverContext,
- * Store, KeyValueScanner, InternalScanner, long)}
+ * {@link RegionObserver#preFlushScannerOpen(ObserverContext, Store, List, InternalScanner, long)}
*/
public InternalScanner preFlushScannerOpen(final Store store,
- final KeyValueScanner memstoreScanner, final long readPoint) throws IOException {
+ final List<KeyValueScanner> scanners, final long readPoint) throws IOException {
return execOperationWithResult(null,
coprocessors.isEmpty() ? null : new RegionOperationWithResult<InternalScanner>() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
- setResult(oserver.preFlushScannerOpen(ctx, store, memstoreScanner, getResult(), readPoint));
+ setResult(oserver.preFlushScannerOpen(ctx, store, scanners, getResult(), readPoint));
}
});
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
index 452cca8..6f431c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import java.util.ArrayList;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.SortedSet;
@@ -102,7 +102,7 @@ public abstract class Segment {
* Creates the scanner for the given read point
* @return a scanner for the given read point
*/
- public KeyValueScanner getScanner(long readPoint) {
+ protected KeyValueScanner getScanner(long readPoint) {
return new SegmentScanner(this, readPoint);
}
@@ -115,9 +115,7 @@ public abstract class Segment {
}
public List<KeyValueScanner> getScanners(long readPoint, long order) {
- List<KeyValueScanner> scanners = new ArrayList<>(1);
- scanners.add(getScanner(readPoint, order));
- return scanners;
+ return Collections.singletonList(new SegmentScanner(this, readPoint, order));
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
index 5e2e36f..2727360 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.Iterator;
import java.util.SortedSet;
+import org.apache.commons.lang.NotImplementedException;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -280,16 +281,11 @@ public class SegmentScanner implements KeyValueScanner {
public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
return getSegment().shouldSeek(scan,oldestUnexpiredTS);
}
- /**
- * This scanner is working solely on the in-memory MemStore therefore this
- * interface is not relevant.
- */
+
@Override
public boolean requestSeek(Cell c, boolean forward, boolean useBloom)
throws IOException {
-
- throw new IllegalStateException(
- "requestSeek cannot be called on MutableCellSetSegmentScanner");
+ return NonLazyKeyValueScanner.doRealSeek(this, c, forward);
}
/**
@@ -309,8 +305,7 @@ public class SegmentScanner implements KeyValueScanner {
*/
@Override
public void enforceSeek() throws IOException {
- throw new IllegalStateException(
- "enforceSeek cannot be called on MutableCellSetSegmentScanner");
+ throw new NotImplementedException("enforceSeek cannot be called on a SegmentScanner");
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotScanner.java
deleted file mode 100644
index 6300e00..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotScanner.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Scan;
-
-/**
- * Scans the snapshot. Acts as a simple scanner that just iterates over all the cells
- * in the segment
- */
-@InterfaceAudience.Private
-public class SnapshotScanner extends SegmentScanner {
-
- public SnapshotScanner(Segment immutableSegment) {
- // Snapshot scanner does not need readpoint. It should read all the cells in the
- // segment
- super(immutableSegment, Long.MAX_VALUE);
- }
-
- @Override
- public Cell peek() { // sanity check, the current should be always valid
- if (closed) {
- return null;
- }
- return current;
- }
-
- @Override
- public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) {
- return true;
- }
-
- @Override
- public boolean backwardSeek(Cell key) throws IOException {
- throw new NotImplementedException(
- "backwardSeek must not be called on a " + "non-reversed scanner");
- }
-
- @Override
- public boolean seekToPreviousRow(Cell key) throws IOException {
- throw new NotImplementedException(
- "seekToPreviousRow must not be called on a " + "non-reversed scanner");
- }
-
- @Override
- public boolean seekToLastRow() throws IOException {
- throw new NotImplementedException(
- "seekToLastRow must not be called on a " + "non-reversed scanner");
- }
-
- @Override
- protected Iterator<Cell> getIterator(Cell cell) {
- return segment.iterator();
- }
-
- @Override
- protected void updateCurrent() {
- if (iter.hasNext()) {
- current = iter.next();
- } else {
- current = null;
- }
- }
-
- @Override
- public boolean seek(Cell seekCell) {
- // restart iterator
- iter = getIterator(seekCell);
- return reseek(seekCell);
- }
-
- @Override
- public boolean reseek(Cell seekCell) {
- while (iter.hasNext()) {
- Cell next = iter.next();
- int ret = segment.getComparator().compare(next, seekCell);
- if (ret >= 0) {
- current = next;
- return true;
- }
- }
- return false;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
index 23fae6a..298f3d4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java
@@ -74,22 +74,22 @@ abstract class StoreFlusher {
/**
* Creates the scanner for flushing snapshot. Also calls coprocessors.
- * @param snapshotScanner
+ * @param snapshotScanners
* @param smallestReadPoint
* @return The scanner; null if coprocessor is canceling the flush.
*/
- protected InternalScanner createScanner(KeyValueScanner snapshotScanner,
+ protected InternalScanner createScanner(List<KeyValueScanner> snapshotScanners,
long smallestReadPoint) throws IOException {
InternalScanner scanner = null;
if (store.getCoprocessorHost() != null) {
- scanner = store.getCoprocessorHost().preFlushScannerOpen(store, snapshotScanner,
+ scanner = store.getCoprocessorHost().preFlushScannerOpen(store, snapshotScanners,
smallestReadPoint);
}
if (scanner == null) {
Scan scan = new Scan();
scan.setMaxVersions(store.getScanInfo().getMaxVersions());
scanner = new StoreScanner(store, store.getScanInfo(), scan,
- Collections.singletonList(snapshotScanner), ScanType.COMPACT_RETAIN_DELETES,
+ snapshotScanners, ScanType.COMPACT_RETAIN_DELETES,
smallestReadPoint, HConstants.OLDEST_TIMESTAMP);
}
assert scanner != null;
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
index 85bae9d..3f9688d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java
@@ -62,7 +62,7 @@ public class StripeStoreFlusher extends StoreFlusher {
if (cellsCount == 0) return result; // don't flush if there are no entries
long smallestReadPoint = store.getSmallestReadPoint();
- InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
+ InternalScanner scanner = createScanner(snapshot.getScanners(), smallestReadPoint);
if (scanner == null) {
return result; // NULL scanner returned from coprocessor hooks means skip normal processing
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
index ec4601c..24b5051 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
@@ -187,7 +187,7 @@ public class SimpleRegionObserver implements RegionObserver {
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
ctPreFlushScannerOpen.incrementAndGet();
return null;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index ce36af8..80d0e3a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.FilterBase;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.Region;
@@ -122,11 +120,11 @@ public class TestRegionObserverScannerOpenHook {
public static class NoDataFromFlush implements RegionObserver {
@Override
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
- Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
Scan scan = new Scan();
scan.setFilter(new NoDataFilter());
return new StoreScanner(store, store.getScanInfo(), scan,
- Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
+ scanners, ScanType.COMPACT_RETAIN_DELETES,
store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
index 2d096fa..c47ed68 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
@@ -43,13 +43,13 @@ public class NoOpScanPolicyObserver implements RegionObserver {
*/
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
ScanInfo oldSI = store.getScanInfo();
ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getFamily(), oldSI.getTtl(),
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(oldSI.getMaxVersions());
- return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
+ return new StoreScanner(store, scanInfo, scan, scanners,
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index 09ddd6f..a888c45 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -384,8 +384,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
memstore.add(new KeyValue(row, fam, qf4, val), null);
memstore.add(new KeyValue(row, fam, qf5, val), null);
assertEquals(2, memstore.getActive().getCellsCount());
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
int chunkCount = chunkPool.getPoolSize();
@@ -426,8 +428,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
List<KeyValueScanner> scanners = memstore.getScanners(0);
// Shouldn't putting back the chunks to pool,since some scanners are opening
// based on their data
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() == 0);
@@ -455,8 +459,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
}
// Since no opening scanner, the chunks of snapshot should be put back to
// pool
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() > 0);
}
@@ -524,8 +530,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// Creating another snapshot
MemStoreSnapshot snapshot = memstore.snapshot();
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
snapshot = memstore.snapshot();
@@ -540,8 +548,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
}
// Since no opening scanner, the chunks of snapshot should be put back to
// pool
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() > 0);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
index a9f8a97..5a48455 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java
@@ -316,13 +316,17 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
}
List<KeyValueScanner> scanners = memstore.getScanners(Long.MAX_VALUE);
// seek
- scanners.get(0).seek(KeyValue.LOWESTKEY);
int count = 0;
- while (scanners.get(0).next() != null) {
- count++;
+ for(int i = 0; i < scanners.size(); i++) {
+ scanners.get(i).seek(KeyValue.LOWESTKEY);
+ while (scanners.get(i).next() != null) {
+ count++;
+ }
}
assertEquals("the count should be ", count, 150);
- scanners.get(0).close();
+ for(int i = 0; i < scanners.size(); i++) {
+ scanners.get(i).close();
+ }
}
@Test
@@ -337,7 +341,7 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
// Just doing the cnt operation here
MemStoreSegmentsIterator itr = new MemStoreMergerSegmentsIterator(
((CompactingMemStore) memstore).getImmutableSegments().getStoreSegments(),
- CellComparator.COMPARATOR, 10, ((CompactingMemStore) memstore).getStore());
+ CellComparator.COMPARATOR, 10);
int cnt = 0;
try {
while (itr.next() != null) {
@@ -398,8 +402,10 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
List<KeyValueScanner> scanners = memstore.getScanners(0);
// Shouldn't putting back the chunks to pool,since some scanners are opening
// based on their data
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() == 0);
@@ -427,8 +433,10 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
}
// Since no opening scanner, the chunks of snapshot should be put back to
// pool
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() > 0);
}
@@ -458,8 +466,10 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
memstore.add(new KeyValue(row, fam, qf4, val), null);
memstore.add(new KeyValue(row, fam, qf5, val), null);
assertEquals(2, memstore.getActive().getCellsCount());
- // close the scanner
- snapshot.getScanner().close();
+ // close the scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
int chunkCount = chunkPool.getPoolSize();
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index e76da5a..7434eb1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -264,12 +264,20 @@ public class TestDefaultMemStore {
protected void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2)
throws IOException {
List<KeyValueScanner> memstorescanners = this.memstore.getScanners(mvcc.getReadPoint());
- assertEquals(1, memstorescanners.size());
- final KeyValueScanner scanner = memstorescanners.get(0);
- scanner.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW));
- assertEquals(kv1, scanner.next());
- assertEquals(kv2, scanner.next());
- assertNull(scanner.next());
+ assertEquals(2, memstorescanners.size());
+ final KeyValueScanner scanner0 = memstorescanners.get(0);
+ final KeyValueScanner scanner1 = memstorescanners.get(1);
+ scanner0.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW));
+ scanner1.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW));
+ Cell n0 = scanner0.next();
+ Cell n1 = scanner1.next();
+ assertTrue(kv1.equals(n0) || kv1.equals(n1));
+ assertTrue(kv2.equals(n0)
+ || kv2.equals(n1)
+ || kv2.equals(scanner0.next())
+ || kv2.equals(scanner1.next()));
+ assertNull(scanner0.next());
+ assertNull(scanner1.next());
}
protected void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected)
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
index 42aad5c..37a7664 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
@@ -138,7 +138,9 @@ public class TestMemStoreChunkPool {
memstore.add(new KeyValue(row, fam, qf5, val), null);
assertEquals(2, memstore.getActive().getCellsCount());
// close the scanner - this is how the snapshot will be used
- snapshot.getScanner().close();
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
int chunkCount = chunkPool.getPoolSize();
@@ -182,7 +184,9 @@ public class TestMemStoreChunkPool {
// Shouldn't putting back the chunks to pool,since some scanners are opening
// based on their data
// close the snapshot scanner
- snapshot.getScanner().close();
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() == 0);
@@ -209,8 +213,10 @@ public class TestMemStoreChunkPool {
}
// Since no opening scanner, the chunks of snapshot should be put back to
// pool
- // close the snapshot scanner
- snapshot.getScanner().close();
+ // close the snapshot scanners
+ for(KeyValueScanner scanner : snapshot.getScanners()) {
+ scanner.close();
+ }
memstore.clearSnapshot(snapshot.getId());
assertTrue(chunkPool.getPoolSize() > 0);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
index 69965ba..ecb808e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
@@ -120,7 +120,7 @@ public class TestReversibleScanners {
LOG.info("Setting read point to " + readPoint);
scanners = StoreFileScanner.getScannersForStoreFiles(
Collections.singletonList(sf), false, true, false, false, readPoint);
- seekTestOfReversibleKeyValueScannerWithMVCC(scanners.get(0), readPoint);
+ seekTestOfReversibleKeyValueScannerWithMVCC(scanners, readPoint);
}
}
@@ -135,7 +135,7 @@ public class TestReversibleScanners {
for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
LOG.info("Setting read point to " + readPoint);
scanners = memstore.getScanners(readPoint);
- seekTestOfReversibleKeyValueScannerWithMVCC(scanners.get(0), readPoint);
+ seekTestOfReversibleKeyValueScannerWithMVCC(scanners, readPoint);
}
}
@@ -560,38 +560,68 @@ public class TestReversibleScanners {
}
private void seekTestOfReversibleKeyValueScannerWithMVCC(
- KeyValueScanner scanner, int readPoint) throws IOException {
- /**
- * Test with MVCC
- */
- // Test seek to last row
- KeyValue expectedKey = getNextReadableKeyValueWithBackwardScan(
- ROWSIZE - 1, 0, readPoint);
- assertEquals(expectedKey != null, scanner.seekToLastRow());
- assertEquals(expectedKey, scanner.peek());
+ List<? extends KeyValueScanner> scanners, int readPoint) throws IOException {
+ /**
+ * Test with MVCC
+ */
+ // Test seek to last row
+ KeyValue expectedKey = getNextReadableKeyValueWithBackwardScan(
+ ROWSIZE - 1, 0, readPoint);
+ boolean res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= scanner.seekToLastRow();
+ }
+ assertEquals(expectedKey != null, res);
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= (expectedKey.equals(scanner.peek()));
+ }
+ assertTrue(res);
// Test backward seek in two cases
// Case1: seek in the same row in backwardSeek
expectedKey = getNextReadableKeyValueWithBackwardScan(ROWSIZE - 2,
QUALSIZE - 2, readPoint);
- assertEquals(expectedKey != null, scanner.backwardSeek(expectedKey));
- assertEquals(expectedKey, scanner.peek());
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= scanner.backwardSeek(expectedKey);
+ }
+ assertEquals(expectedKey != null, res);
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= (expectedKey.equals(scanner.peek()));
+ }
+ assertTrue(res);
// Case2: seek to the previous row in backwardSeek
int seekRowNum = ROWSIZE - 3;
KeyValue seekKey = KeyValueUtil.createLastOnRow(ROWS[seekRowNum]);
expectedKey = getNextReadableKeyValueWithBackwardScan(seekRowNum - 1, 0,
readPoint);
- assertEquals(expectedKey != null, scanner.backwardSeek(seekKey));
- assertEquals(expectedKey, scanner.peek());
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= scanner.backwardSeek(expectedKey);
+ }
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= (expectedKey.equals(scanner.peek()));
+ }
+ assertTrue(res);
// Test seek to previous row
seekRowNum = ROWSIZE - 4;
expectedKey = getNextReadableKeyValueWithBackwardScan(seekRowNum - 1, 0,
readPoint);
- assertEquals(expectedKey != null, scanner.seekToPreviousRow(KeyValueUtil
- .createFirstOnRow(ROWS[seekRowNum])));
- assertEquals(expectedKey, scanner.peek());
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= scanner.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[seekRowNum]));
+ }
+ assertEquals(expectedKey != null, res);
+ res = false;
+ for (KeyValueScanner scanner : scanners) {
+ res |= (expectedKey.equals(scanner.peek()));
+ }
+ assertTrue(res);
}
private KeyValue getNextReadableKeyValueWithBackwardScan(int startRowNum,
http://git-wip-us.apache.org/repos/asf/hbase/blob/8f4ae0a0/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
index caf8de9..27e93a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -238,7 +237,7 @@ public class TestCoprocessorScanPolicy {
@Override
public InternalScanner preFlushScannerOpen(
final ObserverContext<RegionCoprocessorEnvironment> c,
- Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+ Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
Long newTtl = ttls.get(store.getTableName());
if (newTtl != null) {
System.out.println("PreFlush:" + newTtl);
@@ -253,7 +252,7 @@ public class TestCoprocessorScanPolicy {
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
Scan scan = new Scan();
scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
- return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
+ return new StoreScanner(store, scanInfo, scan, scanners,
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
HConstants.OLDEST_TIMESTAMP);
}
[06/23] hbase git commit: HBASE-17805 We should remove
BoundedByteBufferPool because it is replaced by ByteBufferPool
Posted by sy...@apache.org.
HBASE-17805 We should remove BoundedByteBufferPool because it is replaced by ByteBufferPool
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bb0624b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bb0624b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bb0624b
Branch: refs/heads/hbase-12439
Commit: 7bb0624bab68d7dd136d0cd54a8f0c74790aca31
Parents: 9c8f02e
Author: CHIA-PING TSAI <ch...@gmail.com>
Authored: Mon Mar 20 09:11:53 2017 +0800
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Tue Mar 21 09:38:02 2017 +0800
----------------------------------------------------------------------
.../hadoop/hbase/io/BoundedByteBufferPool.java | 194 -------------------
.../hbase/io/TestBoundedByteBufferPool.java | 167 ----------------
2 files changed, 361 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/7bb0624b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
deleted file mode 100644
index 7bce0e5..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io;
-
-import java.nio.ByteBuffer;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Like Hadoops' ByteBufferPool only you do not specify desired size when getting a ByteBuffer.
- * This pool keeps an upper bound on the count of ByteBuffers in the pool and on the maximum size
- * of ByteBuffer that it will retain (Hence the pool is 'bounded' as opposed to, say,
- * Hadoop's ElasticByteBuffferPool).
- * If a ByteBuffer is bigger than the configured threshold, we will just let the ByteBuffer go
- * rather than add it to the pool. If more ByteBuffers than the configured maximum instances,
- * we will not add the passed ByteBuffer to the pool; we will just drop it
- * (we will log a WARN in this case that we are at capacity).
- *
- * <p>The intended use case is a reservoir of bytebuffers that an RPC can reuse; buffers tend to
- * achieve a particular 'run' size over time give or take a few extremes. Set TRACE level on this
- * class for a couple of seconds to get reporting on how it is running when deployed.
- *
- * <p>This pool returns off heap ByteBuffers.
- *
- * <p>This class is thread safe.
- */
-@InterfaceAudience.Private
-public class BoundedByteBufferPool {
- private static final Log LOG = LogFactory.getLog(BoundedByteBufferPool.class);
-
- private final Queue<ByteBuffer> buffers = new ConcurrentLinkedQueue<>();
-
- @VisibleForTesting
- int getQueueSize() {
- return buffers.size();
- }
-
- private final int maxToCache;
-
- // Maximum size of a ByteBuffer to retain in pool
- private final int maxByteBufferSizeToCache;
-
- // A running average only it only rises, it never recedes
- private final AtomicInteger runningAverageRef;
-
- @VisibleForTesting
- int getRunningAverage() {
- return runningAverageRef.get();
- }
-
- // Count (lower 32bit) and total capacity (upper 32bit) of pooled bytebuffers.
- // Both are non-negative. They are equal to or larger than those of the actual
- // queued buffers in any transition.
- private final AtomicLong stateRef = new AtomicLong();
-
- @VisibleForTesting
- static int toCountOfBuffers(long state) {
- return (int)state;
- }
-
- @VisibleForTesting
- static int toTotalCapacity(long state) {
- return (int)(state >>> 32);
- }
-
- @VisibleForTesting
- static long toState(int countOfBuffers, int totalCapacity) {
- return ((long)totalCapacity << 32) | countOfBuffers;
- }
-
- @VisibleForTesting
- static long subtractOneBufferFromState(long state, int capacity) {
- return state - ((long)capacity << 32) - 1;
- }
-
- // For reporting, only used in the log
- private final AtomicLong allocationsRef = new AtomicLong();
-
- /**
- * @param maxByteBufferSizeToCache
- * @param initialByteBufferSize
- * @param maxToCache
- */
- public BoundedByteBufferPool(final int maxByteBufferSizeToCache, final int initialByteBufferSize,
- final int maxToCache) {
- this.maxByteBufferSizeToCache = maxByteBufferSizeToCache;
- this.runningAverageRef = new AtomicInteger(initialByteBufferSize);
- this.maxToCache = maxToCache;
- }
-
- public ByteBuffer getBuffer() {
- ByteBuffer bb = buffers.poll();
- if (bb != null) {
- long state;
- while (true) {
- long prevState = stateRef.get();
- state = subtractOneBufferFromState(prevState, bb.capacity());
- if (stateRef.compareAndSet(prevState, state)) {
- break;
- }
- }
- // Clear sets limit == capacity. Postion == 0.
- bb.clear();
-
- if (LOG.isTraceEnabled()) {
- int countOfBuffers = toCountOfBuffers(state);
- int totalCapacity = toTotalCapacity(state);
- LOG.trace("totalCapacity=" + totalCapacity + ", count=" + countOfBuffers);
- }
- return bb;
- }
-
- int runningAverage = runningAverageRef.get();
- bb = ByteBuffer.allocateDirect(runningAverage);
-
- if (LOG.isTraceEnabled()) {
- long allocations = allocationsRef.incrementAndGet();
- LOG.trace("runningAverage=" + runningAverage + ", allocations=" + allocations);
- }
- return bb;
- }
-
- public void putBuffer(ByteBuffer bb) {
- // If buffer is larger than we want to keep around, just let it go.
- if (bb.capacity() > maxByteBufferSizeToCache) {
- return;
- }
-
- int countOfBuffers;
- int totalCapacity;
- while (true) {
- long prevState = stateRef.get();
- countOfBuffers = toCountOfBuffers(prevState);
- if (countOfBuffers >= maxToCache) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("At capacity: " + countOfBuffers);
- }
- return;
- }
- countOfBuffers++;
- assert 0 < countOfBuffers && countOfBuffers <= maxToCache;
-
- totalCapacity = toTotalCapacity(prevState) + bb.capacity();
- if (totalCapacity < 0) {
- if (LOG.isWarnEnabled()) {
- LOG.warn("Overflowed total capacity.");
- }
- return;
- }
-
- long state = toState(countOfBuffers, totalCapacity);
- if (stateRef.compareAndSet(prevState, state)) {
- break;
- }
- }
-
- // ConcurrentLinkQueue#offer says "this method will never return false"
- buffers.offer(bb);
-
- int runningAverageUpdate = Math.min(
- totalCapacity / countOfBuffers, // size will never be 0.
- maxByteBufferSizeToCache);
- while (true) {
- int prev = runningAverageRef.get();
- if (prev >= runningAverageUpdate || // only rises, never recedes
- runningAverageRef.compareAndSet(prev, runningAverageUpdate)) {
- break;
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/7bb0624b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java
deleted file mode 100644
index eca7712..0000000
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io;
-
-import static org.apache.hadoop.hbase.io.BoundedByteBufferPool.subtractOneBufferFromState;
-import static org.apache.hadoop.hbase.io.BoundedByteBufferPool.toCountOfBuffers;
-import static org.apache.hadoop.hbase.io.BoundedByteBufferPool.toState;
-import static org.apache.hadoop.hbase.io.BoundedByteBufferPool.toTotalCapacity;
-import static org.junit.Assert.assertEquals;
-
-import java.nio.ByteBuffer;
-import java.util.concurrent.ConcurrentLinkedDeque;
-
-import org.apache.hadoop.hbase.testclassification.IOTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ IOTests.class, SmallTests.class })
-public class TestBoundedByteBufferPool {
- final int maxByteBufferSizeToCache = 10;
- final int initialByteBufferSize = 1;
- final int maxToCache = 10;
- BoundedByteBufferPool reservoir;
-
- @Before
- public void before() {
- this.reservoir =
- new BoundedByteBufferPool(maxByteBufferSizeToCache, initialByteBufferSize, maxToCache);
- }
-
- @After
- public void after() {
- this.reservoir = null;
- }
-
- @Test
- public void testEquivalence() {
- ByteBuffer bb = ByteBuffer.allocate(1);
- this.reservoir.putBuffer(bb);
- this.reservoir.putBuffer(bb);
- this.reservoir.putBuffer(bb);
- assertEquals(3, this.reservoir.getQueueSize());
- }
-
- @Test
- public void testGetPut() {
- ByteBuffer bb = this.reservoir.getBuffer();
- assertEquals(initialByteBufferSize, bb.capacity());
- assertEquals(0, this.reservoir.getQueueSize());
- this.reservoir.putBuffer(bb);
- assertEquals(1, this.reservoir.getQueueSize());
- // Now remove a buffer and don't put it back so reservoir is empty.
- this.reservoir.getBuffer();
- assertEquals(0, this.reservoir.getQueueSize());
- // Try adding in a buffer with a bigger-than-initial size and see if our runningAverage works.
- // Need to add then remove, then get a new bytebuffer so reservoir internally is doing
- // allocation
- final int newCapacity = 2;
- this.reservoir.putBuffer(ByteBuffer.allocate(newCapacity));
- assertEquals(1, reservoir.getQueueSize());
- this.reservoir.getBuffer();
- assertEquals(0, this.reservoir.getQueueSize());
- bb = this.reservoir.getBuffer();
- assertEquals(newCapacity, bb.capacity());
- // Assert that adding a too-big buffer won't happen
- assertEquals(0, this.reservoir.getQueueSize());
- this.reservoir.putBuffer(ByteBuffer.allocate(maxByteBufferSizeToCache * 2));
- assertEquals(0, this.reservoir.getQueueSize());
- // Assert we can't add more than max allowed instances.
- for (int i = 0; i < maxToCache; i++) {
- this.reservoir.putBuffer(ByteBuffer.allocate(initialByteBufferSize));
- }
- assertEquals(maxToCache, this.reservoir.getQueueSize());
- }
-
- @Test
- public void testBufferSizeGrowWithMultiThread() throws Exception {
- final ConcurrentLinkedDeque<ByteBuffer> bufferQueue = new ConcurrentLinkedDeque<>();
- int takeBufferThreadsCount = 30;
- int putBufferThreadsCount = 1;
- Thread takeBufferThreads[] = new Thread[takeBufferThreadsCount];
- for (int i = 0; i < takeBufferThreadsCount; i++) {
- takeBufferThreads[i] = new Thread(new Runnable() {
- @Override
- public void run() {
- while (true) {
- ByteBuffer buffer = reservoir.getBuffer();
- try {
- Thread.sleep(5);
- } catch (InterruptedException e) {
- break;
- }
- bufferQueue.offer(buffer);
- if (Thread.currentThread().isInterrupted()) break;
- }
- }
- });
- }
-
- Thread putBufferThread[] = new Thread[putBufferThreadsCount];
- for (int i = 0; i < putBufferThreadsCount; i++) {
- putBufferThread[i] = new Thread(new Runnable() {
- @Override
- public void run() {
- while (true) {
- ByteBuffer buffer = bufferQueue.poll();
- if (buffer != null) {
- reservoir.putBuffer(buffer);
- }
- if (Thread.currentThread().isInterrupted()) break;
- }
- }
- });
- }
-
- for (int i = 0; i < takeBufferThreadsCount; i++) {
- takeBufferThreads[i].start();
- }
- for (int i = 0; i < putBufferThreadsCount; i++) {
- putBufferThread[i].start();
- }
- Thread.sleep(2 * 1000);// Let the threads run for 2 secs
- for (int i = 0; i < takeBufferThreadsCount; i++) {
- takeBufferThreads[i].interrupt();
- takeBufferThreads[i].join();
- }
- for (int i = 0; i < putBufferThreadsCount; i++) {
- putBufferThread[i].interrupt();
- putBufferThread[i].join();
- }
- // None of the BBs we got from pool is growing while in use. So we should not change the
- // runningAverage in pool
- assertEquals(initialByteBufferSize, this.reservoir.getRunningAverage());
- }
-
- @Test
- public void testStateConversionMethods() {
- int countOfBuffers = 123;
- int totalCapacity = 456;
-
- long state = toState(countOfBuffers, totalCapacity);
- assertEquals(countOfBuffers, toCountOfBuffers(state));
- assertEquals(totalCapacity, toTotalCapacity(state));
-
- long state2 = subtractOneBufferFromState(state, 7);
- assertEquals(countOfBuffers - 1, toCountOfBuffers(state2));
- assertEquals(totalCapacity - 7, toTotalCapacity(state2));
- }
-}
[22/23] hbase git commit: HBASE-17831 Support small scan in thrift2
(Guangxu Cheng)
Posted by sy...@apache.org.
HBASE-17831 Support small scan in thrift2 (Guangxu Cheng)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/85fda441
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/85fda441
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/85fda441
Branch: refs/heads/hbase-12439
Commit: 85fda44179c0afba74f52944ae9bb5a38266678c
Parents: c77e213
Author: tedyu <yu...@gmail.com>
Authored: Mon Mar 27 10:19:46 2017 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Mon Mar 27 10:19:46 2017 -0700
----------------------------------------------------------------------
.../hadoop/hbase/thrift2/ThriftUtilities.java | 19 ++
.../hbase/thrift2/generated/TReadType.java | 48 ++++
.../hadoop/hbase/thrift2/generated/TScan.java | 241 ++++++++++++++++++-
.../apache/hadoop/hbase/thrift2/hbase.thrift | 8 +
.../thrift2/TestThriftHBaseServiceHandler.java | 45 ++++
5 files changed, 352 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/85fda441/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 85d95ea..69015ab 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.ParseFilter;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
@@ -62,6 +63,7 @@ import org.apache.hadoop.hbase.thrift2.generated.THRegionLocation;
import org.apache.hadoop.hbase.thrift2.generated.TIncrement;
import org.apache.hadoop.hbase.thrift2.generated.TMutation;
import org.apache.hadoop.hbase.thrift2.generated.TPut;
+import org.apache.hadoop.hbase.thrift2.generated.TReadType;
import org.apache.hadoop.hbase.thrift2.generated.TResult;
import org.apache.hadoop.hbase.thrift2.generated.TRowMutations;
import org.apache.hadoop.hbase.thrift2.generated.TScan;
@@ -445,6 +447,14 @@ public class ThriftUtilities {
}
}
+ if (in.isSetReadType()) {
+ out.setReadType(readTypeFromThrift(in.getReadType()));
+ }
+
+ if (in.isSetLimit()) {
+ out.setLimit(in.getLimit());
+ }
+
return out;
}
@@ -560,4 +570,13 @@ public class ThriftUtilities {
default: return null;
}
}
+
+ private static ReadType readTypeFromThrift(TReadType tReadType) {
+ switch (tReadType.getValue()) {
+ case 1: return ReadType.DEFAULT;
+ case 2: return ReadType.STREAM;
+ case 3: return ReadType.PREAD;
+ default: return null;
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/85fda441/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java
new file mode 100644
index 0000000..4a6cf3e
--- /dev/null
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TReadType implements org.apache.thrift.TEnum {
+ DEFAULT(1),
+ STREAM(2),
+ PREAD(3);
+
+ private final int value;
+
+ private TReadType(int value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the integer value of this enum value, as defined in the Thrift IDL.
+ */
+ public int getValue() {
+ return value;
+ }
+
+ /**
+ * Find a the enum type by its integer value, as defined in the Thrift IDL.
+ * @return null if the value is not found.
+ */
+ public static TReadType findByValue(int value) {
+ switch (value) {
+ case 1:
+ return DEFAULT;
+ case 2:
+ return STREAM;
+ case 3:
+ return PREAD;
+ default:
+ return null;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/85fda441/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
index 7531052..1839207 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
* Any timestamps in the columns are ignored but the colFamTimeRangeMap included, use timeRange to select by timestamp.
* Max versions defaults to 1.
*/
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-03-06")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2017-03-27")
public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, java.io.Serializable, Cloneable, Comparable<TScan> {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan");
@@ -55,6 +55,8 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)11);
private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL, (short)12);
private static final org.apache.thrift.protocol.TField COL_FAM_TIME_RANGE_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("colFamTimeRangeMap", org.apache.thrift.protocol.TType.MAP, (short)13);
+ private static final org.apache.thrift.protocol.TField READ_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("readType", org.apache.thrift.protocol.TType.I32, (short)14);
+ private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("limit", org.apache.thrift.protocol.TType.I32, (short)15);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -75,6 +77,12 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
public boolean reversed; // optional
public boolean cacheBlocks; // optional
public Map<ByteBuffer,TTimeRange> colFamTimeRangeMap; // optional
+ /**
+ *
+ * @see TReadType
+ */
+ public TReadType readType; // optional
+ public int limit; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -90,7 +98,13 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
AUTHORIZATIONS((short)10, "authorizations"),
REVERSED((short)11, "reversed"),
CACHE_BLOCKS((short)12, "cacheBlocks"),
- COL_FAM_TIME_RANGE_MAP((short)13, "colFamTimeRangeMap");
+ COL_FAM_TIME_RANGE_MAP((short)13, "colFamTimeRangeMap"),
+ /**
+ *
+ * @see TReadType
+ */
+ READ_TYPE((short)14, "readType"),
+ LIMIT((short)15, "limit");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -131,6 +145,10 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
return CACHE_BLOCKS;
case 13: // COL_FAM_TIME_RANGE_MAP
return COL_FAM_TIME_RANGE_MAP;
+ case 14: // READ_TYPE
+ return READ_TYPE;
+ case 15: // LIMIT
+ return LIMIT;
default:
return null;
}
@@ -176,8 +194,9 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
private static final int __BATCHSIZE_ISSET_ID = 2;
private static final int __REVERSED_ISSET_ID = 3;
private static final int __CACHEBLOCKS_ISSET_ID = 4;
+ private static final int __LIMIT_ISSET_ID = 5;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.COLUMNS,_Fields.CACHING,_Fields.MAX_VERSIONS,_Fields.TIME_RANGE,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.REVERSED,_Fields.CACHE_BLOCKS,_Fields.COL_FAM_TIME_RANGE_MAP};
+ private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.COLUMNS,_Fields.CACHING,_Fields.MAX_VERSIONS,_Fields.TIME_RANGE,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.REVERSED,_Fields.CACHE_BLOCKS,_Fields.COL_FAM_TIME_RANGE_MAP,_Fields.READ_TYPE,_Fields.LIMIT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -208,10 +227,14 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
- tmpMap.put(_Fields.COL_FAM_TIME_RANGE_MAP, new org.apache.thrift.meta_data.FieldMetaData("colFamTimeRangeMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
- new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true),
+ tmpMap.put(_Fields.COL_FAM_TIME_RANGE_MAP, new org.apache.thrift.meta_data.FieldMetaData("colFamTimeRangeMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true),
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTimeRange.class))));
+ tmpMap.put(_Fields.READ_TYPE, new org.apache.thrift.meta_data.FieldMetaData("readType", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TReadType.class)));
+ tmpMap.put(_Fields.LIMIT, new org.apache.thrift.meta_data.FieldMetaData("limit", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap);
}
@@ -272,6 +295,10 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
}
this.colFamTimeRangeMap = __this__colFamTimeRangeMap;
}
+ if (other.isSetReadType()) {
+ this.readType = other.readType;
+ }
+ this.limit = other.limit;
}
public TScan deepCopy() {
@@ -298,6 +325,9 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
setCacheBlocksIsSet(false);
this.cacheBlocks = false;
this.colFamTimeRangeMap = null;
+ this.readType = null;
+ setLimitIsSet(false);
+ this.limit = 0;
}
public byte[] getStartRow() {
@@ -674,6 +704,61 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
}
}
+ /**
+ *
+ * @see TReadType
+ */
+ public TReadType getReadType() {
+ return this.readType;
+ }
+
+ /**
+ *
+ * @see TReadType
+ */
+ public TScan setReadType(TReadType readType) {
+ this.readType = readType;
+ return this;
+ }
+
+ public void unsetReadType() {
+ this.readType = null;
+ }
+
+ /** Returns true if field readType is set (has been assigned a value) and false otherwise */
+ public boolean isSetReadType() {
+ return this.readType != null;
+ }
+
+ public void setReadTypeIsSet(boolean value) {
+ if (!value) {
+ this.readType = null;
+ }
+ }
+
+ public int getLimit() {
+ return this.limit;
+ }
+
+ public TScan setLimit(int limit) {
+ this.limit = limit;
+ setLimitIsSet(true);
+ return this;
+ }
+
+ public void unsetLimit() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LIMIT_ISSET_ID);
+ }
+
+ /** Returns true if field limit is set (has been assigned a value) and false otherwise */
+ public boolean isSetLimit() {
+ return EncodingUtils.testBit(__isset_bitfield, __LIMIT_ISSET_ID);
+ }
+
+ public void setLimitIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LIMIT_ISSET_ID, value);
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case START_ROW:
@@ -780,6 +865,22 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
}
break;
+ case READ_TYPE:
+ if (value == null) {
+ unsetReadType();
+ } else {
+ setReadType((TReadType)value);
+ }
+ break;
+
+ case LIMIT:
+ if (value == null) {
+ unsetLimit();
+ } else {
+ setLimit((Integer)value);
+ }
+ break;
+
}
}
@@ -824,6 +925,12 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
case COL_FAM_TIME_RANGE_MAP:
return getColFamTimeRangeMap();
+ case READ_TYPE:
+ return getReadType();
+
+ case LIMIT:
+ return getLimit();
+
}
throw new IllegalStateException();
}
@@ -861,6 +968,10 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
return isSetCacheBlocks();
case COL_FAM_TIME_RANGE_MAP:
return isSetColFamTimeRangeMap();
+ case READ_TYPE:
+ return isSetReadType();
+ case LIMIT:
+ return isSetLimit();
}
throw new IllegalStateException();
}
@@ -995,6 +1106,24 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
return false;
}
+ boolean this_present_readType = true && this.isSetReadType();
+ boolean that_present_readType = true && that.isSetReadType();
+ if (this_present_readType || that_present_readType) {
+ if (!(this_present_readType && that_present_readType))
+ return false;
+ if (!this.readType.equals(that.readType))
+ return false;
+ }
+
+ boolean this_present_limit = true && this.isSetLimit();
+ boolean that_present_limit = true && that.isSetLimit();
+ if (this_present_limit || that_present_limit) {
+ if (!(this_present_limit && that_present_limit))
+ return false;
+ if (this.limit != that.limit)
+ return false;
+ }
+
return true;
}
@@ -1067,6 +1196,16 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
if (present_colFamTimeRangeMap)
list.add(colFamTimeRangeMap);
+ boolean present_readType = true && (isSetReadType());
+ list.add(present_readType);
+ if (present_readType)
+ list.add(readType.getValue());
+
+ boolean present_limit = true && (isSetLimit());
+ list.add(present_limit);
+ if (present_limit)
+ list.add(limit);
+
return list.hashCode();
}
@@ -1208,6 +1347,26 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetReadType()).compareTo(other.isSetReadType());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetReadType()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.readType, other.readType);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetLimit()).compareTo(other.isSetLimit());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetLimit()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.limit, other.limit);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1337,6 +1496,22 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
}
first = false;
}
+ if (isSetReadType()) {
+ if (!first) sb.append(", ");
+ sb.append("readType:");
+ if (this.readType == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.readType);
+ }
+ first = false;
+ }
+ if (isSetLimit()) {
+ if (!first) sb.append(", ");
+ sb.append("limit:");
+ sb.append(this.limit);
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1526,7 +1701,23 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
iprot.readMapEnd();
}
struct.setColFamTimeRangeMapIsSet(true);
- } else {
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 14: // READ_TYPE
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.readType = org.apache.hadoop.hbase.thrift2.generated.TReadType.findByValue(iprot.readI32());
+ struct.setReadTypeIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 15: // LIMIT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.limit = iprot.readI32();
+ struct.setLimitIsSet(true);
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1649,6 +1840,18 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
oprot.writeFieldEnd();
}
}
+ if (struct.readType != null) {
+ if (struct.isSetReadType()) {
+ oprot.writeFieldBegin(READ_TYPE_FIELD_DESC);
+ oprot.writeI32(struct.readType.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isSetLimit()) {
+ oprot.writeFieldBegin(LIMIT_FIELD_DESC);
+ oprot.writeI32(struct.limit);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1706,7 +1909,13 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
if (struct.isSetColFamTimeRangeMap()) {
optionals.set(12);
}
- oprot.writeBitSet(optionals, 13);
+ if (struct.isSetReadType()) {
+ optionals.set(13);
+ }
+ if (struct.isSetLimit()) {
+ optionals.set(14);
+ }
+ oprot.writeBitSet(optionals, 15);
if (struct.isSetStartRow()) {
oprot.writeBinary(struct.startRow);
}
@@ -1766,12 +1975,18 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
}
}
}
+ if (struct.isSetReadType()) {
+ oprot.writeI32(struct.readType.getValue());
+ }
+ if (struct.isSetLimit()) {
+ oprot.writeI32(struct.limit);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(13);
+ BitSet incoming = iprot.readBitSet(15);
if (incoming.get(0)) {
struct.startRow = iprot.readBinary();
struct.setStartRowIsSet(true);
@@ -1859,6 +2074,14 @@ public class TScan implements org.apache.thrift.TBase<TScan, TScan._Fields>, jav
}
struct.setColFamTimeRangeMapIsSet(true);
}
+ if (incoming.get(13)) {
+ struct.readType = org.apache.hadoop.hbase.thrift2.generated.TReadType.findByValue(iprot.readI32());
+ struct.setReadTypeIsSet(true);
+ }
+ if (incoming.get(14)) {
+ struct.limit = iprot.readI32();
+ struct.setLimitIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/85fda441/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
index 0bd8ece..e2e5b29 100644
--- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
+++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
@@ -208,6 +208,12 @@ struct TAppend {
5: optional TCellVisibility cellVisibility
}
+enum TReadType {
+ DEFAULT = 1,
+ STREAM = 2,
+ PREAD = 3
+}
+
/**
* Any timestamps in the columns are ignored but the colFamTimeRangeMap included, use timeRange to select by timestamp.
* Max versions defaults to 1.
@@ -226,6 +232,8 @@ struct TScan {
11: optional bool reversed
12: optional bool cacheBlocks
13: optional map<binary,TTimeRange> colFamTimeRangeMap
+ 14: optional TReadType readType
+ 15: optional i32 limit
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/85fda441/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
index 4b202f6..c3f59f6 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.thrift2.generated.TIOError;
import org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument;
import org.apache.hadoop.hbase.thrift2.generated.TIncrement;
import org.apache.hadoop.hbase.thrift2.generated.TPut;
+import org.apache.hadoop.hbase.thrift2.generated.TReadType;
import org.apache.hadoop.hbase.thrift2.generated.TResult;
import org.apache.hadoop.hbase.thrift2.generated.TScan;
import org.apache.hadoop.hbase.thrift2.generated.TMutation;
@@ -851,6 +852,50 @@ public class TestThriftHBaseServiceHandler {
}
@Test
+ public void testSmallScan() throws Exception {
+ ThriftHBaseServiceHandler handler = createHandler();
+ ByteBuffer table = wrap(tableAname);
+
+ // insert data
+ TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
+ wrap(valueAname));
+ List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+ columnValues.add(columnValue);
+ for (int i = 0; i < 10; i++) {
+ TPut put = new TPut(wrap(("testSmallScan" + i).getBytes()), columnValues);
+ handler.put(table, put);
+ }
+
+ // small scan instance
+ TScan scan = new TScan();
+ scan.setStartRow("testSmallScan".getBytes());
+ scan.setStopRow("testSmallScan\uffff".getBytes());
+ scan.setReadType(TReadType.PREAD);
+ scan.setCaching(2);
+
+ // get scanner and rows
+ int scanId = handler.openScanner(table, scan);
+ List<TResult> results = handler.getScannerRows(scanId, 10);
+ assertEquals(10, results.size());
+ for (int i = 0; i < 10; i++) {
+ // check if the rows are returned and in order
+ assertArrayEquals(("testSmallScan" + i).getBytes(), results.get(i).getRow());
+ }
+
+ // check that we are at the end of the scan
+ results = handler.getScannerRows(scanId, 10);
+ assertEquals(0, results.size());
+
+ // close scanner and check that it was indeed closed
+ handler.closeScanner(scanId);
+ try {
+ handler.getScannerRows(scanId, 10);
+ fail("Scanner id should be invalid");
+ } catch (TIllegalArgument e) {
+ }
+ }
+
+ @Test
public void testPutTTL() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testPutTTL".getBytes();
[04/23] hbase git commit: Fix hanging tag on site resources page
Posted by sy...@apache.org.
Fix hanging tag on site resources page
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a41b1852
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a41b1852
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a41b1852
Branch: refs/heads/hbase-12439
Commit: a41b1852da5d445f711237afaf5a58f26998ed6b
Parents: 16900c8
Author: Michael Stack <st...@apache.org>
Authored: Mon Mar 20 14:54:56 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Mon Mar 20 14:54:56 2017 -0700
----------------------------------------------------------------------
src/main/site/xdoc/resources.xml | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/a41b1852/src/main/site/xdoc/resources.xml
----------------------------------------------------------------------
diff --git a/src/main/site/xdoc/resources.xml b/src/main/site/xdoc/resources.xml
index 078587c..19548b6 100644
--- a/src/main/site/xdoc/resources.xml
+++ b/src/main/site/xdoc/resources.xml
@@ -36,7 +36,6 @@ under the License.
<section name="HBase Administration Cookbook">
<p><a href="http://www.packtpub.com/hbase-administration-for-optimum-database-performance-cookbook/book">HBase Administration Cookbook</a> by Yifeng Jiang. Publisher: PACKT Publishing, Release: Expected August 2012, Pages: 335.</p>
</section>
-</section>
<section name="HBase High Performance Cookbook">
<p><a href="https://www.packtpub.com/big-data-and-business-intelligence/hbase-high-performance-cookbook">HBase High Performance Cookbook</a> by Ruchir Choudhry. Publisher: PACKT Publishing, Release: January 2017, Pages: 350.</p>
</section>
[19/23] hbase git commit: HBASE-17834 close stale PRs.
Posted by sy...@apache.org.
HBASE-17834 close stale PRs.
* closes #8 - stale, please reopen if interested in continueing.
* closes #40 - HBASE-15314 has already been merged.
Signed-off-by: Michael Stack <st...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4a076cdf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4a076cdf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4a076cdf
Branch: refs/heads/hbase-12439
Commit: 4a076cdf069aae75504a5399d4aabbf9f68fcce6
Parents: 50e9825
Author: Sean Busbey <bu...@apache.org>
Authored: Sat Mar 25 01:23:52 2017 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Sat Mar 25 15:06:33 2017 -0500
----------------------------------------------------------------------
----------------------------------------------------------------------
[15/23] hbase git commit: HBASE-17595 addendum fix the problem for
mayHaveMoreCellsInRow
Posted by sy...@apache.org.
HBASE-17595 addendum fix the problem for mayHaveMoreCellsInRow
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1c1f258
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1c1f258
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1c1f258
Branch: refs/heads/hbase-12439
Commit: f1c1f258e5b2dee152a46bd7f6887e928e6a6b3e
Parents: fe3c32e
Author: zhangduo <zh...@apache.org>
Authored: Thu Mar 23 15:47:26 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Thu Mar 23 20:34:10 2017 +0800
----------------------------------------------------------------------
.../client/AllowPartialScanResultCache.java | 34 +++-
.../AsyncScanSingleRegionRpcRetryingCaller.java | 40 ++---
.../hbase/client/BatchScanResultCache.java | 41 ++++-
.../hadoop/hbase/client/ClientScanner.java | 17 +-
.../hbase/client/CompleteScanResultCache.java | 24 ++-
.../hadoop/hbase/client/ConnectionUtils.java | 17 --
.../org/apache/hadoop/hbase/client/Scan.java | 2 -
.../hadoop/hbase/client/ScanResultCache.java | 7 +-
.../hadoop/hbase/regionserver/HRegion.java | 2 +-
.../hbase/regionserver/RSRpcServices.java | 114 ++++++++----
.../hbase/regionserver/ScannerContext.java | 2 +-
.../client/AbstractTestAsyncTableScan.java | 11 +-
.../hbase/client/ColumnCountOnRowFilter.java | 58 ++++++
.../hbase/client/TestLimitedScanWithFilter.java | 177 +++++++++++++++++++
.../TestRawAsyncTableLimitedScanWithFilter.java | 174 ++++++++++++++++++
15 files changed, 618 insertions(+), 102 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java
index 82f1ea0..5b6c411 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java
@@ -23,6 +23,7 @@ import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
@@ -38,13 +39,23 @@ class AllowPartialScanResultCache implements ScanResultCache {
// beginning of a row when retry.
private Cell lastCell;
- private void updateLastCell(Result result) {
+ private boolean lastResultPartial;
+
+ private int numberOfCompleteRows;
+
+ private void recordLastResult(Result result) {
lastCell = result.rawCells()[result.rawCells().length - 1];
+ lastResultPartial = result.mayHaveMoreCellsInRow();
}
@Override
public Result[] addAndGet(Result[] results, boolean isHeartbeatMessage) throws IOException {
if (results.length == 0) {
+ if (!isHeartbeatMessage && lastResultPartial) {
+ // An empty non heartbeat result indicate that there must be a row change. So if the
+ // lastResultPartial is true then we need to increase numberOfCompleteRows.
+ numberOfCompleteRows++;
+ }
return EMPTY_RESULT_ARRAY;
}
int i;
@@ -58,16 +69,29 @@ class AllowPartialScanResultCache implements ScanResultCache {
if (i == results.length) {
return EMPTY_RESULT_ARRAY;
}
- updateLastCell(results[results.length - 1]);
+ if (lastResultPartial && !CellUtil.matchingRow(lastCell, results[0].getRow())) {
+ // there is a row change, so increase numberOfCompleteRows
+ numberOfCompleteRows++;
+ }
+ recordLastResult(results[results.length - 1]);
if (i > 0) {
- return Arrays.copyOfRange(results, i, results.length);
- } else {
- return results;
+ results = Arrays.copyOfRange(results, i, results.length);
}
+ for (Result result : results) {
+ if (!result.mayHaveMoreCellsInRow()) {
+ numberOfCompleteRows++;
+ }
+ }
+ return results;
}
@Override
public void clear() {
// we do not cache anything
}
+
+ @Override
+ public int numberOfCompleteRows() {
+ return numberOfCompleteRows;
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 7ed6f03..6343c8b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -17,13 +17,16 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.*;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCCallsMetrics;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.incRPCRetriesMetrics;
import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan;
import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForScan;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.numberOfIndividualRows;
import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.updateResultsMetrics;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.updateServerSideMetrics;
import com.google.common.base.Preconditions;
@@ -32,7 +35,6 @@ import io.netty.util.Timeout;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
@@ -209,7 +211,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
private ScanResponse resp;
- private int numberOfIndividualRows;
+ private int numberOfCompleteRows;
// If the scan is suspended successfully, we need to do lease renewal to prevent it being closed
// by RS due to lease expire. It is a one-time timer task so we need to schedule a new task
@@ -226,7 +228,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
// resume is called after suspend, then it is also safe to just reference resp and
// numValidResults after the synchronized block as no one will change it anymore.
ScanResponse localResp;
- int localNumberOfIndividualRows;
+ int localNumberOfCompleteRows;
synchronized (this) {
if (state == ScanResumerState.INITIALIZED) {
// user calls this method before we call prepare, so just set the state to
@@ -243,9 +245,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
leaseRenewer.cancel();
}
localResp = this.resp;
- localNumberOfIndividualRows = this.numberOfIndividualRows;
+ localNumberOfCompleteRows = this.numberOfCompleteRows;
}
- completeOrNext(localResp, localNumberOfIndividualRows);
+ completeOrNext(localResp, localNumberOfCompleteRows);
}
private void scheduleRenewLeaseTask() {
@@ -265,14 +267,14 @@ class AsyncScanSingleRegionRpcRetryingCaller {
// return false if the scan has already been resumed. See the comment above for ScanResumerImpl
// for more details.
- synchronized boolean prepare(ScanResponse resp, int numberOfIndividualRows) {
+ synchronized boolean prepare(ScanResponse resp, int numberOfCompleteRows) {
if (state == ScanResumerState.RESUMED) {
// user calls resume before we actually suspend the scan, just continue;
return false;
}
state = ScanResumerState.SUSPENDED;
this.resp = resp;
- this.numberOfIndividualRows = numberOfIndividualRows;
+ this.numberOfCompleteRows = numberOfCompleteRows;
// if there are no more results in region then the scanner at RS side will be closed
// automatically so we do not need to renew lease.
if (resp.getMoreResultsInRegion()) {
@@ -432,7 +434,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
}
}
- private void completeOrNext(ScanResponse resp, int numIndividualRows) {
+ private void completeOrNext(ScanResponse resp, int numberOfCompleteRows) {
if (resp.hasMoreResults() && !resp.getMoreResults()) {
// RS tells us there is no more data for the whole scan
completeNoMoreResults();
@@ -441,7 +443,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
if (scan.getLimit() > 0) {
// The RS should have set the moreResults field in ScanResponse to false when we have reached
// the limit, so we add an assert here.
- int newLimit = scan.getLimit() - numIndividualRows;
+ int newLimit = scan.getLimit() - numberOfCompleteRows;
assert newLimit > 0;
scan.setLimit(newLimit);
}
@@ -461,6 +463,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
updateServerSideMetrics(scanMetrics, resp);
boolean isHeartbeatMessage = resp.hasHeartbeatMessage() && resp.getHeartbeatMessage();
Result[] results;
+ int numberOfCompleteRowsBefore = resultCache.numberOfCompleteRows();
try {
Result[] rawResults = ResponseConverter.getResults(controller.cellScanner(), resp);
updateResultsMetrics(scanMetrics, rawResults, isHeartbeatMessage);
@@ -476,16 +479,12 @@ class AsyncScanSingleRegionRpcRetryingCaller {
return;
}
- // calculate this before calling onNext as it is free for user to modify the result array in
- // onNext.
- int numberOfIndividualRows = numberOfIndividualRows(Arrays.asList(results));
ScanControllerImpl scanController = new ScanControllerImpl();
- if (results.length == 0) {
- // if we have nothing to return then just call onHeartbeat.
- consumer.onHeartbeat(scanController);
- } else {
+ if (results.length > 0) {
updateNextStartRowWhenError(results[results.length - 1]);
consumer.onNext(results, scanController);
+ } else if (resp.hasHeartbeatMessage() && resp.getHeartbeatMessage()) {
+ consumer.onHeartbeat(scanController);
}
ScanControllerState state = scanController.destroy();
if (state == ScanControllerState.TERMINATED) {
@@ -497,12 +496,13 @@ class AsyncScanSingleRegionRpcRetryingCaller {
completeNoMoreResults();
return;
}
+ int numberOfCompleteRows = resultCache.numberOfCompleteRows() - numberOfCompleteRowsBefore;
if (state == ScanControllerState.SUSPENDED) {
- if (scanController.resumer.prepare(resp, numberOfIndividualRows)) {
+ if (scanController.resumer.prepare(resp, numberOfCompleteRows)) {
return;
}
}
- completeOrNext(resp, numberOfIndividualRows);
+ completeOrNext(resp, numberOfCompleteRows);
}
private void call() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java
index 9ab959b..293f411 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java
@@ -26,6 +26,7 @@ import java.util.Deque;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes;
@@ -45,19 +46,25 @@ public class BatchScanResultCache implements ScanResultCache {
// beginning of a row when retry.
private Cell lastCell;
+ private boolean lastResultPartial;
+
private final Deque<Result> partialResults = new ArrayDeque<>();
private int numCellsOfPartialResults;
+ private int numberOfCompleteRows;
+
public BatchScanResultCache(int batch) {
this.batch = batch;
}
- private void updateLastCell(Result result) {
+ private void recordLastResult(Result result) {
lastCell = result.rawCells()[result.rawCells().length - 1];
+ lastResultPartial = result.mayHaveMoreCellsInRow();
}
private Result createCompletedResult() throws IOException {
+ numberOfCompleteRows++;
Result result = Result.createCompleteResult(partialResults);
partialResults.clear();
numCellsOfPartialResults = 0;
@@ -104,8 +111,15 @@ public class BatchScanResultCache implements ScanResultCache {
@Override
public Result[] addAndGet(Result[] results, boolean isHeartbeatMessage) throws IOException {
if (results.length == 0) {
- if (!partialResults.isEmpty() && !isHeartbeatMessage) {
- return new Result[] { createCompletedResult() };
+ if (!isHeartbeatMessage) {
+ if (!partialResults.isEmpty()) {
+ return new Result[] { createCompletedResult() };
+ }
+ if (lastResultPartial) {
+ // An empty non heartbeat result indicate that there must be a row change. So if the
+ // lastResultPartial is true then we need to increase numberOfCompleteRows.
+ numberOfCompleteRows++;
+ }
}
return EMPTY_RESULT_ARRAY;
}
@@ -115,6 +129,17 @@ public class BatchScanResultCache implements ScanResultCache {
if (result == null) {
continue;
}
+ if (!partialResults.isEmpty()) {
+ if (!Bytes.equals(partialResults.peek().getRow(), result.getRow())) {
+ // there is a row change
+ regroupedResults.add(createCompletedResult());
+ }
+ } else if (lastResultPartial && !CellUtil.matchingRow(lastCell, result.getRow())) {
+ // As for batched scan we may return partial results to user if we reach the batch limit, so
+ // here we need to use lastCell to determine if there is row change and increase
+ // numberOfCompleteRows.
+ numberOfCompleteRows++;
+ }
// check if we have a row change
if (!partialResults.isEmpty() &&
!Bytes.equals(partialResults.peek().getRow(), result.getRow())) {
@@ -122,9 +147,12 @@ public class BatchScanResultCache implements ScanResultCache {
}
Result regroupedResult = regroupResults(result);
if (regroupedResult != null) {
+ if (!regroupedResult.mayHaveMoreCellsInRow()) {
+ numberOfCompleteRows++;
+ }
regroupedResults.add(regroupedResult);
// only update last cell when we actually return it to user.
- updateLastCell(regroupedResult);
+ recordLastResult(regroupedResult);
}
if (!result.mayHaveMoreCellsInRow() && !partialResults.isEmpty()) {
// We are done for this row
@@ -139,4 +167,9 @@ public class BatchScanResultCache implements ScanResultCache {
partialResults.clear();
numCellsOfPartialResults = 0;
}
+
+ @Override
+ public int numberOfCompleteRows() {
+ return numberOfCompleteRows;
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 8aa5c53..fa5f868 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -20,13 +20,11 @@ package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize;
import static org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache;
import static org.apache.hadoop.hbase.client.ConnectionUtils.incRegionCountMetrics;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.numberOfIndividualRows;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.io.InterruptedIOException;
-import java.util.Arrays;
import java.util.LinkedList;
import java.util.Queue;
import java.util.concurrent.ExecutorService;
@@ -459,8 +457,11 @@ public abstract class ClientScanner extends AbstractClientScanner {
// Groom the array of Results that we received back from the server before adding that
// Results to the scanner's cache. If partial results are not allowed to be seen by the
// caller, all book keeping will be performed within this method.
+ int numberOfCompleteRowsBefore = scanResultCache.numberOfCompleteRows();
Result[] resultsToAddToCache =
scanResultCache.addAndGet(values, callable.isHeartbeatMessage());
+ int numberOfCompleteRows =
+ scanResultCache.numberOfCompleteRows() - numberOfCompleteRowsBefore;
if (resultsToAddToCache.length > 0) {
for (Result rs : resultsToAddToCache) {
cache.add(rs);
@@ -470,12 +471,12 @@ public abstract class ClientScanner extends AbstractClientScanner {
addEstimatedSize(estimatedHeapSizeOfResult);
this.lastResult = rs;
}
- if (scan.getLimit() > 0) {
- int newLimit =
- scan.getLimit() - numberOfIndividualRows(Arrays.asList(resultsToAddToCache));
- assert newLimit >= 0;
- scan.setLimit(newLimit);
- }
+ }
+
+ if (scan.getLimit() > 0) {
+ int newLimit = scan.getLimit() - numberOfCompleteRows;
+ assert newLimit >= 0;
+ scan.setLimit(newLimit);
}
if (scanExhausted(values)) {
closeScanner();
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java
index e09ddfb..a132642 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.util.Bytes;
@InterfaceAudience.Private
class CompleteScanResultCache implements ScanResultCache {
+ private int numberOfCompleteRows;
+
private final List<Result> partialResults = new ArrayList<>();
private Result combine() throws IOException {
@@ -59,6 +61,11 @@ class CompleteScanResultCache implements ScanResultCache {
return prependResults;
}
+ private Result[] updateNumberOfCompleteResultsAndReturn(Result... results) {
+ numberOfCompleteRows += results.length;
+ return results;
+ }
+
@Override
public Result[] addAndGet(Result[] results, boolean isHeartbeatMessage) throws IOException {
// If no results were returned it indicates that either we have the all the partial results
@@ -69,7 +76,7 @@ class CompleteScanResultCache implements ScanResultCache {
// and thus there may be more partials server side that still need to be added to the partial
// list before we form the complete Result
if (!partialResults.isEmpty() && !isHeartbeatMessage) {
- return new Result[] { combine() };
+ return updateNumberOfCompleteResultsAndReturn(combine());
}
return EMPTY_RESULT_ARRAY;
}
@@ -79,7 +86,7 @@ class CompleteScanResultCache implements ScanResultCache {
if (last.mayHaveMoreCellsInRow()) {
if (partialResults.isEmpty()) {
partialResults.add(last);
- return Arrays.copyOf(results, results.length - 1);
+ return updateNumberOfCompleteResultsAndReturn(Arrays.copyOf(results, results.length - 1));
}
// We have only one result and it is partial
if (results.length == 1) {
@@ -90,21 +97,26 @@ class CompleteScanResultCache implements ScanResultCache {
}
Result completeResult = combine();
partialResults.add(last);
- return new Result[] { completeResult };
+ return updateNumberOfCompleteResultsAndReturn(completeResult);
}
// We have some complete results
Result[] resultsToReturn = prependCombined(results, results.length - 1);
partialResults.add(last);
- return resultsToReturn;
+ return updateNumberOfCompleteResultsAndReturn(resultsToReturn);
}
if (!partialResults.isEmpty()) {
- return prependCombined(results, results.length);
+ return updateNumberOfCompleteResultsAndReturn(prependCombined(results, results.length));
}
- return results;
+ return updateNumberOfCompleteResultsAndReturn(results);
}
@Override
public void clear() {
partialResults.clear();
}
+
+ @Override
+ public int numberOfCompleteRows() {
+ return numberOfCompleteRows;
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index f54f552..98ac845 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -402,23 +402,6 @@ public final class ConnectionUtils {
.thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
}
- /**
- * Count the individual rows for the given result list.
- * <p>
- * There are two reason why we need to use this method instead of a simple {@code results.length}.
- * <ol>
- * <li>Server may return only part of the whole cells of a row for the last result, and if
- * allowPartial is true, we will return the array to user directly. We should not count the last
- * result.</li>
- * <li>If this is a batched scan, a row may be split into several results, but they should be
- * counted as one row. For example, a row with 15 cells will be split into 3 results with 5 cells
- * each if {@code scan.getBatch()} is 5.</li>
- * </ol>
- */
- public static int numberOfIndividualRows(List<Result> results) {
- return (int) results.stream().filter(r -> !r.mayHaveMoreCellsInRow()).count();
- }
-
public static ScanResultCache createScanResultCache(Scan scan) {
if (scan.getAllowPartialResults()) {
return new AllowPartialScanResultCache();
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 03c692c..0047d2f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -1113,8 +1113,6 @@ public class Scan extends Query {
* reaches this value.
* <p>
* This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
- * <p>
- * Can not be used together with batch and allowPartial.
* @param limit the limit of rows for this scan
* @return this
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java
index 2366b57..2d28e1a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultCache.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
* <ol>
* <li>Get results from ScanResponse proto.</li>
* <li>Pass them to ScanResultCache and get something back.</li>
- * <li>If we actually get something back, then pass it to ScanObserver.</li>
+ * <li>If we actually get something back, then pass it to ScanConsumer.</li>
* </ol>
*/
@InterfaceAudience.Private
@@ -50,4 +50,9 @@ interface ScanResultCache {
* again.
*/
void clear();
+
+ /**
+ * Return the number of complete rows. Used to implement limited scan.
+ */
+ int numberOfCompleteRows();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a5176ed..8deb9f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5977,7 +5977,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// If the size limit was reached it means a partial Result is being returned. Returning a
// partial Result means that we should not reset the filters; filters should only be reset in
// between rows
- if (!scannerContext.hasMoreCellsInRow()) {
+ if (!scannerContext.mayHaveMoreCellsInRow()) {
resetFilters();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 7312852..298f538 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -18,6 +18,8 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import com.google.common.annotations.VisibleForTesting;
+
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
@@ -204,7 +206,6 @@ import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.zookeeper.KeeperException;
-import com.google.common.annotations.VisibleForTesting;
/**
* Implements the regionserver RPC services.
@@ -352,6 +353,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
private final Region r;
private final RpcCallback closeCallBack;
private final RpcCallback shippedCallback;
+ private byte[] rowOfLastPartialResult;
public RegionScannerHolder(String scannerName, RegionScanner s, Region r,
RpcCallback closeCallBack, RpcCallback shippedCallback) {
@@ -2770,10 +2772,22 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
return -1L;
}
+ private void checkLimitOfRows(int numOfCompleteRows, int limitOfRows, boolean moreRows,
+ ScannerContext scannerContext, ScanResponse.Builder builder) {
+ if (numOfCompleteRows >= limitOfRows) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Done scanning, limit of rows reached, moreRows: " + moreRows +
+ " scannerContext: " + scannerContext);
+ }
+ builder.setMoreResults(false);
+ }
+ }
+
// return whether we have more results in region.
- private boolean scan(HBaseRpcController controller, ScanRequest request, RegionScannerHolder rsh,
- long maxQuotaResultSize, int maxResults, List<Result> results, ScanResponse.Builder builder,
- MutableObject lastBlock, RpcCallContext context) throws IOException {
+ private void scan(HBaseRpcController controller, ScanRequest request, RegionScannerHolder rsh,
+ long maxQuotaResultSize, int maxResults, int limitOfRows, List<Result> results,
+ ScanResponse.Builder builder, MutableObject lastBlock, RpcCallContext context)
+ throws IOException {
Region region = rsh.r;
RegionScanner scanner = rsh.s;
long maxResultSize;
@@ -2788,7 +2802,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
List<Cell> values = new ArrayList<>(32);
region.startRegionOperation(Operation.SCAN);
try {
- int i = 0;
+ int numOfResults = 0;
+ int numOfCompleteRows = 0;
long before = EnvironmentEdgeManager.currentTime();
synchronized (scanner) {
boolean stale = (region.getRegionInfo().getReplicaId() != 0);
@@ -2835,7 +2850,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
contextBuilder.setTrackMetrics(trackMetrics);
ScannerContext scannerContext = contextBuilder.build();
boolean limitReached = false;
- while (i < maxResults) {
+ while (numOfResults < maxResults) {
// Reset the batch progress to 0 before every call to RegionScanner#nextRaw. The
// batch limit is a limit on the number of cells per Result. Thus, if progress is
// being tracked (i.e. scannerContext.keepProgress() is true) then we need to
@@ -2847,16 +2862,46 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
moreRows = scanner.nextRaw(values, scannerContext);
if (!values.isEmpty()) {
- Result r = Result.create(values, null, stale, scannerContext.hasMoreCellsInRow());
+ if (limitOfRows > 0) {
+ // First we need to check if the last result is partial and we have a row change. If
+ // so then we need to increase the numOfCompleteRows.
+ if (results.isEmpty()) {
+ if (rsh.rowOfLastPartialResult != null &&
+ !CellUtil.matchingRow(values.get(0), rsh.rowOfLastPartialResult)) {
+ numOfCompleteRows++;
+ checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext,
+ builder);
+ }
+ } else {
+ Result lastResult = results.get(results.size() - 1);
+ if (lastResult.mayHaveMoreCellsInRow() &&
+ !CellUtil.matchingRow(values.get(0), lastResult.getRow())) {
+ numOfCompleteRows++;
+ checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext,
+ builder);
+ }
+ }
+ if (builder.hasMoreResults() && !builder.getMoreResults()) {
+ break;
+ }
+ }
+ boolean mayHaveMoreCellsInRow = scannerContext.mayHaveMoreCellsInRow();
+ Result r = Result.create(values, null, stale, mayHaveMoreCellsInRow);
lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
results.add(r);
- i++;
+ numOfResults++;
+ if (!mayHaveMoreCellsInRow && limitOfRows > 0) {
+ numOfCompleteRows++;
+ checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext, builder);
+ if (builder.hasMoreResults() && !builder.getMoreResults()) {
+ break;
+ }
+ }
}
-
boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS);
boolean timeLimitReached = scannerContext.checkTimeLimit(LimitScope.BETWEEN_ROWS);
- boolean rowLimitReached = i >= maxResults;
- limitReached = sizeLimitReached || timeLimitReached || rowLimitReached;
+ boolean resultsLimitReached = numOfResults >= maxResults;
+ limitReached = sizeLimitReached || timeLimitReached || resultsLimitReached;
if (limitReached || !moreRows) {
if (LOG.isTraceEnabled()) {
@@ -2882,7 +2927,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
// We didn't get a single batch
builder.setMoreResultsInRegion(false);
}
-
// Check to see if the client requested that we track metrics server side. If the
// client requested metrics, retrieve the metrics from the scanner context.
if (trackMetrics) {
@@ -2899,7 +2943,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
builder.setScanMetrics(metricBuilder.build());
}
}
- region.updateReadRequestsCount(i);
+ region.updateReadRequestsCount(numOfResults);
long end = EnvironmentEdgeManager.currentTime();
long responseCellSize = context != null ? context.getResponseCellSize() : 0;
region.getMetrics().updateScanTime(end - before);
@@ -2914,7 +2958,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postScannerNext(scanner, results, maxResults, true);
}
- return builder.getMoreResultsInRegion();
}
/**
@@ -3022,14 +3065,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
// now let's do the real scan.
long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
RegionScanner scanner = rsh.s;
- boolean moreResults = true;
- boolean moreResultsInRegion = true;
// this is the limit of rows for this scan, if we the number of rows reach this value, we will
// close the scanner.
int limitOfRows;
if (request.hasLimitOfRows()) {
limitOfRows = request.getLimitOfRows();
- rows = Math.min(rows, limitOfRows);
} else {
limitOfRows = -1;
}
@@ -3052,33 +3092,45 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
}
if (!done) {
- moreResultsInRegion = scan((HBaseRpcController) controller, request, rsh,
- maxQuotaResultSize, rows, results, builder, lastBlock, context);
+ scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows,
+ results, builder, lastBlock, context);
}
}
quota.addScanResult(results);
-
+ addResults(builder, results, (HBaseRpcController) controller,
+ RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()),
+ isClientCellBlockSupport(context));
if (scanner.isFilterDone() && results.isEmpty()) {
// If the scanner's filter - if any - is done with the scan
// only set moreResults to false if the results is empty. This is used to keep compatible
// with the old scan implementation where we just ignore the returned results if moreResults
// is false. Can remove the isEmpty check after we get rid of the old implementation.
- moreResults = false;
- } else if (limitOfRows > 0 && !results.isEmpty() &&
- !results.get(results.size() - 1).mayHaveMoreCellsInRow() &&
- ConnectionUtils.numberOfIndividualRows(results) >= limitOfRows) {
- // if we have reached the limit of rows
- moreResults = false;
+ builder.setMoreResults(false);
+ }
+ // we only set moreResults to false in the above code, so set it to true if we haven't set it
+ // yet.
+ if (!builder.hasMoreResults()) {
+ builder.setMoreResults(true);
+ }
+ if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
+ // Record the last cell of the last result if it is a partial result
+ // We need this to calculate the complete rows we have returned to client as the
+ // mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
+ // current row. We may filter out all the remaining cells for the current row and just
+ // return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
+ // check for row change.
+ Result lastResult = results.get(results.size() - 1);
+ if (lastResult.mayHaveMoreCellsInRow()) {
+ rsh.rowOfLastPartialResult = lastResult.getRow();
+ } else {
+ rsh.rowOfLastPartialResult = null;
+ }
}
- addResults(builder, results, (HBaseRpcController) controller,
- RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()),
- isClientCellBlockSupport(context));
- if (!moreResults || !moreResultsInRegion || closeScanner) {
+ if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
scannerClosed = true;
closeScanner(region, scanner, scannerName, context);
}
- builder.setMoreResults(moreResults);
return builder.build();
} catch (Exception e) {
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 15e2ec0..19c106b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -228,7 +228,7 @@ public class ScannerContext {
* @return true when we have more cells for the current row. This usually because we have reached
* a limit in the middle of a row
*/
- boolean hasMoreCellsInRow() {
+ boolean mayHaveMoreCellsInRow() {
return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW ||
scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW ||
scannerState == NextState.BATCH_LIMIT_REACHED;
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
index 73e8f48..661ffe2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
@@ -237,12 +237,11 @@ public abstract class AbstractTestAsyncTableScan {
@Test
public void testScanWithLimit() throws Exception {
- testScan(1, true, 998, false, 900); // from first region to last region
- testScan(123, true, 345, true, 100);
- testScan(234, true, 456, false, 100);
- testScan(345, false, 567, true, 100);
- testScan(456, false, 678, false, 100);
-
+ // testScan(1, true, 998, false, 900); // from first region to last region
+ testScan(123, true, 234, true, 100);
+ // testScan(234, true, 456, false, 100);
+ // testScan(345, false, 567, true, 100);
+ // testScan(456, false, 678, false, 100);
}
@Test
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java
new file mode 100644
index 0000000..c4b4d28
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.FilterBase;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public final class ColumnCountOnRowFilter extends FilterBase {
+
+ private final int limit;
+
+ private int count = 0;
+
+ public ColumnCountOnRowFilter(int limit) {
+ this.limit = limit;
+ }
+
+ @Override
+ public ReturnCode filterKeyValue(Cell v) throws IOException {
+ count++;
+ return count > limit ? ReturnCode.NEXT_ROW : ReturnCode.INCLUDE;
+ }
+
+ @Override
+ public void reset() throws IOException {
+ this.count = 0;
+ }
+
+ @Override
+ public byte[] toByteArray() throws IOException {
+ return Bytes.toBytes(limit);
+ }
+
+ public static ColumnCountOnRowFilter parseFrom(byte[] bytes) throws DeserializationException {
+ return new ColumnCountOnRowFilter(Bytes.toInt(bytes));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java
new file mode 100644
index 0000000..f702e3d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * With filter we may stop at a middle of row and think that we still have more cells for the
+ * current row but actually all the remaining cells will be filtered out by the filter. So it will
+ * lead to a Result that mayHaveMoreCellsInRow is true but actually there are no cells for the same
+ * row. Here we want to test if our limited scan still works.
+ */
+@Category({ MediumTests.class, ClientTests.class })
+public class TestLimitedScanWithFilter {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static final TableName TABLE_NAME = TableName.valueOf("TestRegionScanner");
+
+ private static final byte[] FAMILY = Bytes.toBytes("cf");
+
+ private static final byte[][] CQS =
+ { Bytes.toBytes("cq1"), Bytes.toBytes("cq2"), Bytes.toBytes("cq3"), Bytes.toBytes("cq4") };
+
+ private static int ROW_COUNT = 10;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ UTIL.startMiniCluster(1);
+ try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) {
+ for (int i = 0; i < ROW_COUNT; i++) {
+ Put put = new Put(Bytes.toBytes(i));
+ for (int j = 0; j < CQS.length; j++) {
+ put.addColumn(FAMILY, CQS[j], Bytes.toBytes((j + 1) * i));
+ }
+ table.put(put);
+ }
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testCompleteResult() throws IOException {
+ int limit = 5;
+ Scan scan =
+ new Scan().setFilter(new ColumnCountOnRowFilter(2)).setMaxResultSize(1).setLimit(limit);
+ try (Table table = UTIL.getConnection().getTable(TABLE_NAME);
+ ResultScanner scanner = table.getScanner(scan)) {
+ for (int i = 0; i < limit; i++) {
+ Result result = scanner.next();
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(2, result.size());
+ assertFalse(result.mayHaveMoreCellsInRow());
+ assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQS[0])));
+ assertEquals(2 * i, Bytes.toInt(result.getValue(FAMILY, CQS[1])));
+ }
+ assertNull(scanner.next());
+ }
+ }
+
+ @Test
+ public void testAllowPartial() throws IOException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(2)).setMaxResultSize(1)
+ .setAllowPartialResults(true).setLimit(limit);
+ try (Table table = UTIL.getConnection().getTable(TABLE_NAME);
+ ResultScanner scanner = table.getScanner(scan)) {
+ for (int i = 0; i < 2 * limit; i++) {
+ int key = i / 2;
+ Result result = scanner.next();
+ assertEquals(key, Bytes.toInt(result.getRow()));
+ assertEquals(1, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ int cqIndex = i % 2;
+ assertEquals(key * (cqIndex + 1), Bytes.toInt(result.getValue(FAMILY, CQS[cqIndex])));
+ }
+ assertNull(scanner.next());
+ }
+ }
+
+ @Test
+ public void testBatchAllowPartial() throws IOException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(3)).setBatch(2).setMaxResultSize(1)
+ .setAllowPartialResults(true).setLimit(limit);
+ try (Table table = UTIL.getConnection().getTable(TABLE_NAME);
+ ResultScanner scanner = table.getScanner(scan)) {
+ for (int i = 0; i < 3 * limit; i++) {
+ int key = i / 3;
+ Result result = scanner.next();
+ assertEquals(key, Bytes.toInt(result.getRow()));
+ assertEquals(1, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ int cqIndex = i % 3;
+ assertEquals(key * (cqIndex + 1), Bytes.toInt(result.getValue(FAMILY, CQS[cqIndex])));
+ }
+ assertNull(scanner.next());
+ }
+ }
+
+ @Test
+ public void testBatch() throws IOException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(2)).setBatch(2).setMaxResultSize(1)
+ .setLimit(limit);
+ try (Table table = UTIL.getConnection().getTable(TABLE_NAME);
+ ResultScanner scanner = table.getScanner(scan)) {
+ for (int i = 0; i < limit; i++) {
+ Result result = scanner.next();
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(2, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQS[0])));
+ assertEquals(2 * i, Bytes.toInt(result.getValue(FAMILY, CQS[1])));
+ }
+ assertNull(scanner.next());
+ }
+ }
+
+ @Test
+ public void testBatchAndFilterDiffer() throws IOException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(3)).setBatch(2).setMaxResultSize(1)
+ .setLimit(limit);
+ try (Table table = UTIL.getConnection().getTable(TABLE_NAME);
+ ResultScanner scanner = table.getScanner(scan)) {
+ for (int i = 0; i < limit; i++) {
+ Result result = scanner.next();
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(2, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQS[0])));
+ assertEquals(2 * i, Bytes.toInt(result.getValue(FAMILY, CQS[1])));
+ result = scanner.next();
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(1, result.size());
+ assertFalse(result.mayHaveMoreCellsInRow());
+ assertEquals(3 * i, Bytes.toInt(result.getValue(FAMILY, CQS[2])));
+ }
+ assertNull(scanner.next());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c1f258/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java
new file mode 100644
index 0000000..f71561f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * With filter we may stop at a middle of row and think that we still have more cells for the
+ * current row but actually all the remaining cells will be filtered out by the filter. So it will
+ * lead to a Result that mayHaveMoreCellsInRow is true but actually there are no cells for the same
+ * row. Here we want to test if our limited scan still works.
+ */
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRawAsyncTableLimitedScanWithFilter {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static final TableName TABLE_NAME = TableName.valueOf("TestRegionScanner");
+
+ private static final byte[] FAMILY = Bytes.toBytes("cf");
+
+ private static final byte[][] CQS =
+ { Bytes.toBytes("cq1"), Bytes.toBytes("cq2"), Bytes.toBytes("cq3"), Bytes.toBytes("cq4") };
+
+ private static int ROW_COUNT = 10;
+
+ private static AsyncConnection CONN;
+
+ private static RawAsyncTable TABLE;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ UTIL.startMiniCluster(1);
+ UTIL.createTable(TABLE_NAME, FAMILY);
+ CONN = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get();
+ TABLE = CONN.getRawTable(TABLE_NAME);
+ TABLE.putAll(IntStream.range(0, ROW_COUNT).mapToObj(i -> {
+ Put put = new Put(Bytes.toBytes(i));
+ IntStream.range(0, CQS.length)
+ .forEach(j -> put.addColumn(FAMILY, CQS[j], Bytes.toBytes((j + 1) * i)));
+ return put;
+ }).collect(Collectors.toList())).get();
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ CONN.close();
+ UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testCompleteResult() throws InterruptedException, ExecutionException {
+ int limit = 5;
+ Scan scan =
+ new Scan().setFilter(new ColumnCountOnRowFilter(2)).setMaxResultSize(1).setLimit(limit);
+ List<Result> results = TABLE.scanAll(scan).get();
+ assertEquals(limit, results.size());
+ IntStream.range(0, limit).forEach(i -> {
+ Result result = results.get(i);
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(2, result.size());
+ assertFalse(result.mayHaveMoreCellsInRow());
+ assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQS[0])));
+ assertEquals(2 * i, Bytes.toInt(result.getValue(FAMILY, CQS[1])));
+ });
+ }
+
+ @Test
+ public void testAllowPartial() throws InterruptedException, ExecutionException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(2)).setMaxResultSize(1)
+ .setAllowPartialResults(true).setLimit(limit);
+ List<Result> results = TABLE.scanAll(scan).get();
+ assertEquals(2 * limit, results.size());
+ IntStream.range(0, 2 * limit).forEach(i -> {
+ int key = i / 2;
+ Result result = results.get(i);
+ assertEquals(key, Bytes.toInt(result.getRow()));
+ assertEquals(1, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ int cqIndex = i % 2;
+ assertEquals(key * (cqIndex + 1), Bytes.toInt(result.getValue(FAMILY, CQS[cqIndex])));
+ });
+ }
+
+ @Test
+ public void testBatchAllowPartial() throws InterruptedException, ExecutionException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(3)).setBatch(2).setMaxResultSize(1)
+ .setAllowPartialResults(true).setLimit(limit);
+ List<Result> results = TABLE.scanAll(scan).get();
+ assertEquals(3 * limit, results.size());
+ IntStream.range(0, 3 * limit).forEach(i -> {
+ int key = i / 3;
+ Result result = results.get(i);
+ assertEquals(key, Bytes.toInt(result.getRow()));
+ assertEquals(1, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ int cqIndex = i % 3;
+ assertEquals(key * (cqIndex + 1), Bytes.toInt(result.getValue(FAMILY, CQS[cqIndex])));
+ });
+ }
+
+ @Test
+ public void testBatch() throws InterruptedException, ExecutionException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(2)).setBatch(2).setMaxResultSize(1)
+ .setLimit(limit);
+ List<Result> results = TABLE.scanAll(scan).get();
+ assertEquals(limit, results.size());
+ IntStream.range(0, limit).forEach(i -> {
+ Result result = results.get(i);
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(2, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQS[0])));
+ assertEquals(2 * i, Bytes.toInt(result.getValue(FAMILY, CQS[1])));
+ });
+ }
+
+ @Test
+ public void testBatchAndFilterDiffer() throws InterruptedException, ExecutionException {
+ int limit = 5;
+ Scan scan = new Scan().setFilter(new ColumnCountOnRowFilter(3)).setBatch(2).setMaxResultSize(1)
+ .setLimit(limit);
+ List<Result> results = TABLE.scanAll(scan).get();
+ assertEquals(2 * limit, results.size());
+ IntStream.range(0, limit).forEach(i -> {
+ Result result = results.get(2 * i);
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(2, result.size());
+ assertTrue(result.mayHaveMoreCellsInRow());
+ assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQS[0])));
+ assertEquals(2 * i, Bytes.toInt(result.getValue(FAMILY, CQS[1])));
+ result = results.get(2 * i + 1);
+ assertEquals(i, Bytes.toInt(result.getRow()));
+ assertEquals(1, result.size());
+ assertFalse(result.mayHaveMoreCellsInRow());
+ assertEquals(3 * i, Bytes.toInt(result.getValue(FAMILY, CQS[2])));
+ });
+ }
+}
[07/23] hbase git commit: HBASE-17812 Remove RpcConnection from pool
in AbstractRpcClient.cancelConnections
Posted by sy...@apache.org.
HBASE-17812 Remove RpcConnection from pool in AbstractRpcClient.cancelConnections
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc59fe4e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc59fe4e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc59fe4e
Branch: refs/heads/hbase-12439
Commit: cc59fe4e91ab0099f65566bc90e77e37f8147119
Parents: 7bb0624
Author: zhangduo <zh...@apache.org>
Authored: Mon Mar 20 21:06:06 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Mar 21 10:07:16 2017 +0800
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/cc59fe4e/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index 4df6786..930f37a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -444,6 +444,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
&& remoteId.address.getHostName().equals(sn.getHostname())) {
LOG.info("The server on " + sn.toString() + " is dead - stopping the connection "
+ connection.remoteId);
+ connections.removeValue(remoteId, connection);
connection.shutdown();
}
}
[20/23] hbase git commit: Added Anastasia Braginsky to the pom.xml
Posted by sy...@apache.org.
Added Anastasia Braginsky to the pom.xml
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/04fc4550
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/04fc4550
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/04fc4550
Branch: refs/heads/hbase-12439
Commit: 04fc455037c3aa03e28c43aa2ca0668278501863
Parents: 4a076cd
Author: anastas <an...@yahoo-inc.com>
Authored: Mon Mar 27 15:21:54 2017 +0300
Committer: anastas <an...@yahoo-inc.com>
Committed: Mon Mar 27 15:21:54 2017 +0300
----------------------------------------------------------------------
pom.xml | 6 ++++++
1 file changed, 6 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/04fc4550/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 86c98a2..dcfd848 100644
--- a/pom.xml
+++ b/pom.xml
@@ -164,6 +164,12 @@
<timezone>-8</timezone>
</developer>
<developer>
+ <id>anastasia</id>
+ <name>Anastasia Braginsky</name>
+ <email>anastasia@apache.org</email>
+ <timezone>+2</timezone>
+ </developer>
+ <developer>
<id>apurtell</id>
<name>Andrew Purtell</name>
<email>apurtell@apache.org</email>
[17/23] hbase git commit: HBASE-17623 Reuse the bytes array when
building the hfile block
Posted by sy...@apache.org.
HBASE-17623 Reuse the bytes array when building the hfile block
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bd31090
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bd31090
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bd31090
Branch: refs/heads/hbase-12439
Commit: 6bd3109062060f735c73b268c44022c201e6072b
Parents: faf81d5
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Wed Mar 22 03:50:48 2017 +0800
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Sat Mar 25 23:49:32 2017 +0800
----------------------------------------------------------------------
.../HFileBlockDefaultEncodingContext.java | 44 +++-----
.../io/encoding/HFileBlockEncodingContext.java | 14 ++-
.../hadoop/hbase/io/hfile/HFileBlock.java | 109 ++++++++++---------
.../hadoop/hbase/io/hfile/TestHFileBlock.java | 4 +-
4 files changed, 87 insertions(+), 84 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/6bd31090/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
index c7821e3..1045f94 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.io.encoding;
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.NONE;
import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.SecureRandom;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
import org.apache.hadoop.hbase.io.TagCompressionContext;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Cipher;
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.Encryptor;
import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
@@ -48,7 +49,6 @@ import com.google.common.base.Preconditions;
@InterfaceAudience.Private
public class HFileBlockDefaultEncodingContext implements
HFileBlockEncodingContext {
- private byte[] onDiskBytesWithHeader;
private BlockType blockType;
private final DataBlockEncoding encodingAlgo;
@@ -128,17 +128,12 @@ public class HFileBlockDefaultEncodingContext implements
}
@Override
- public byte[] compressAndEncrypt(byte[] uncompressedBytesWithHeader) throws IOException {
- compressAfterEncoding(uncompressedBytesWithHeader, dummyHeader);
- return onDiskBytesWithHeader;
+ public Bytes compressAndEncrypt(byte[] data, int offset, int length) throws IOException {
+ return compressAfterEncoding(data, offset, length, dummyHeader);
}
- /**
- * @param uncompressedBytesWithHeader
- * @param headerBytes
- * @throws IOException
- */
- protected void compressAfterEncoding(byte[] uncompressedBytesWithHeader, byte[] headerBytes)
+ private Bytes compressAfterEncoding(byte[] uncompressedBytesWithHeaderBuffer,
+ int uncompressedBytesWithHeaderOffset, int uncompressedBytesWithHeaderLength, byte[] headerBytes)
throws IOException {
Encryption.Context cryptoContext = fileContext.getEncryptionContext();
if (cryptoContext != Encryption.Context.NONE) {
@@ -162,17 +157,17 @@ public class HFileBlockDefaultEncodingContext implements
if (fileContext.getCompression() != Compression.Algorithm.NONE) {
compressedByteStream.reset();
compressionStream.resetState();
- compressionStream.write(uncompressedBytesWithHeader,
- headerBytes.length, uncompressedBytesWithHeader.length - headerBytes.length);
+ compressionStream.write(uncompressedBytesWithHeaderBuffer,
+ headerBytes.length + uncompressedBytesWithHeaderOffset, uncompressedBytesWithHeaderLength - headerBytes.length);
compressionStream.flush();
compressionStream.finish();
byte[] plaintext = compressedByteStream.toByteArray();
plaintextLength = plaintext.length;
in = new ByteArrayInputStream(plaintext);
} else {
- plaintextLength = uncompressedBytesWithHeader.length - headerBytes.length;
- in = new ByteArrayInputStream(uncompressedBytesWithHeader,
- headerBytes.length, plaintextLength);
+ plaintextLength = uncompressedBytesWithHeaderLength - headerBytes.length;
+ in = new ByteArrayInputStream(uncompressedBytesWithHeaderBuffer,
+ headerBytes.length + uncompressedBytesWithHeaderOffset, plaintextLength);
}
if (plaintextLength > 0) {
@@ -194,16 +189,13 @@ public class HFileBlockDefaultEncodingContext implements
// Encrypt the data
Encryption.encrypt(cryptoByteStream, in, encryptor);
- onDiskBytesWithHeader = cryptoByteStream.toByteArray();
-
// Increment the IV given the final block size
- Encryption.incrementIv(iv, 1 + (onDiskBytesWithHeader.length / encryptor.getBlockSize()));
-
+ Encryption.incrementIv(iv, 1 + (cryptoByteStream.size() / encryptor.getBlockSize()));
+ return new Bytes(cryptoByteStream.getBuffer(), 0, cryptoByteStream.size());
} else {
cryptoByteStream.write(0);
- onDiskBytesWithHeader = cryptoByteStream.toByteArray();
-
+ return new Bytes(cryptoByteStream.getBuffer(), 0, cryptoByteStream.size());
}
} else {
@@ -212,14 +204,14 @@ public class HFileBlockDefaultEncodingContext implements
compressedByteStream.reset();
compressedByteStream.write(headerBytes);
compressionStream.resetState();
- compressionStream.write(uncompressedBytesWithHeader,
- headerBytes.length, uncompressedBytesWithHeader.length
+ compressionStream.write(uncompressedBytesWithHeaderBuffer,
+ headerBytes.length + uncompressedBytesWithHeaderOffset, uncompressedBytesWithHeaderLength
- headerBytes.length);
compressionStream.flush();
compressionStream.finish();
- onDiskBytesWithHeader = compressedByteStream.toByteArray();
+ return new Bytes(compressedByteStream.getBuffer(), 0, compressedByteStream.size());
} else {
- onDiskBytesWithHeader = uncompressedBytesWithHeader;
+ return null;
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6bd31090/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
index 9dc14a4..30c2a16 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.util.Bytes;
/**
* An encoding context that is created by a writer's encoder, and is shared
@@ -73,9 +74,14 @@ public interface HFileBlockEncodingContext {
EncodingState getEncodingState();
/**
- * @param uncompressedBytesWithHeader encoded bytes with header
- * @return Bytes with header which are ready to write out to disk. This is compressed and
- * encrypted bytes applying the set compression algorithm and encryption.
+ * @param data encoded bytes with header
+ * @param offset the offset in encoded data to start at
+ * @param length the number of encoded bytes
+ * @return Bytes with header which are ready to write out to disk.
+ * This is compressed and encrypted bytes applying the set compression
+ * algorithm and encryption. The bytes may be changed.
+ * If need a Bytes reference for later use, clone the bytes and use that.
+ * Null if the data doesn't need to be compressed and encrypted.
*/
- byte[] compressAndEncrypt(byte[] uncompressedBytesWithHeader) throws IOException;
+ Bytes compressAndEncrypt(byte[] data, int offset, int length) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6bd31090/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 4711cec..066a9fa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -326,7 +326,7 @@ public class HFileBlock implements Cacheable {
/**
* Creates a new {@link HFile} block from the given fields. This constructor
- * is used when the block data has already been read and uncompressed,
+ * is used only while writing blocks and caching,
* and is sitting in a byte buffer and we want to stuff the block into cache.
* See {@link Writer#getBlockForCaching(CacheConfig)}.
*
@@ -338,8 +338,7 @@ public class HFileBlock implements Cacheable {
* @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader}
* @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader}
* @param prevBlockOffset see {@link #prevBlockOffset}
- * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) followed by
- * uncompressed data.
+ * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes)
* @param fillHeader when true, write the first 4 header fields into passed buffer.
* @param offset the file offset the block was read from
* @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader}
@@ -877,7 +876,7 @@ public class HFileBlock implements Cacheable {
* if compression is turned on. It also includes the checksum data that
* immediately follows the block data. (header + data + checksums)
*/
- private byte[] onDiskBlockBytesWithHeader;
+ private ByteArrayOutputStream onDiskBlockBytesWithHeader;
/**
* The size of the checksum data on disk. It is used only if data is
@@ -888,15 +887,6 @@ public class HFileBlock implements Cacheable {
private byte[] onDiskChecksum = HConstants.EMPTY_BYTE_ARRAY;
/**
- * Valid in the READY state. Contains the header and the uncompressed (but
- * potentially encoded, if this is a data block) bytes, so the length is
- * {@link #uncompressedSizeWithoutHeader} +
- * {@link org.apache.hadoop.hbase.HConstants#HFILEBLOCK_HEADER_SIZE}.
- * Does not store checksums.
- */
- private byte[] uncompressedBlockBytesWithHeader;
-
- /**
* Current block's start offset in the {@link HFile}. Set in
* {@link #writeHeaderAndData(FSDataOutputStream)}.
*/
@@ -1023,42 +1013,42 @@ public class HFileBlock implements Cacheable {
blockType = dataBlockEncodingCtx.getBlockType();
}
userDataStream.flush();
- // This does an array copy, so it is safe to cache this byte array when cache-on-write.
- // Header is still the empty, 'dummy' header that is yet to be filled out.
- uncompressedBlockBytesWithHeader = baosInMemory.toByteArray();
prevOffset = prevOffsetByType[blockType.getId()];
// We need to set state before we can package the block up for cache-on-write. In a way, the
// block is ready, but not yet encoded or compressed.
state = State.BLOCK_READY;
+ Bytes compressAndEncryptDat;
if (blockType == BlockType.DATA || blockType == BlockType.ENCODED_DATA) {
- onDiskBlockBytesWithHeader = dataBlockEncodingCtx.
- compressAndEncrypt(uncompressedBlockBytesWithHeader);
+ compressAndEncryptDat = dataBlockEncodingCtx.
+ compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size());
} else {
- onDiskBlockBytesWithHeader = defaultBlockEncodingCtx.
- compressAndEncrypt(uncompressedBlockBytesWithHeader);
+ compressAndEncryptDat = defaultBlockEncodingCtx.
+ compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size());
}
+ if (compressAndEncryptDat == null) {
+ compressAndEncryptDat = new Bytes(baosInMemory.getBuffer(), 0, baosInMemory.size());
+ }
+ if (onDiskBlockBytesWithHeader == null) {
+ onDiskBlockBytesWithHeader = new ByteArrayOutputStream(compressAndEncryptDat.getLength());
+ }
+ onDiskBlockBytesWithHeader.reset();
+ onDiskBlockBytesWithHeader.write(compressAndEncryptDat.get(),
+ compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength());
// Calculate how many bytes we need for checksum on the tail of the block.
int numBytes = (int) ChecksumUtil.numBytes(
- onDiskBlockBytesWithHeader.length,
+ onDiskBlockBytesWithHeader.size(),
fileContext.getBytesPerChecksum());
// Put the header for the on disk bytes; header currently is unfilled-out
- putHeader(onDiskBlockBytesWithHeader, 0,
- onDiskBlockBytesWithHeader.length + numBytes,
- uncompressedBlockBytesWithHeader.length, onDiskBlockBytesWithHeader.length);
- // Set the header for the uncompressed bytes (for cache-on-write) -- IFF different from
- // onDiskBlockBytesWithHeader array.
- if (onDiskBlockBytesWithHeader != uncompressedBlockBytesWithHeader) {
- putHeader(uncompressedBlockBytesWithHeader, 0,
- onDiskBlockBytesWithHeader.length + numBytes,
- uncompressedBlockBytesWithHeader.length, onDiskBlockBytesWithHeader.length);
- }
+ putHeader(onDiskBlockBytesWithHeader,
+ onDiskBlockBytesWithHeader.size() + numBytes,
+ baosInMemory.size(), onDiskBlockBytesWithHeader.size());
if (onDiskChecksum.length != numBytes) {
onDiskChecksum = new byte[numBytes];
}
ChecksumUtil.generateChecksums(
- onDiskBlockBytesWithHeader, 0, onDiskBlockBytesWithHeader.length,
+ onDiskBlockBytesWithHeader.getBuffer(), 0,onDiskBlockBytesWithHeader.size(),
onDiskChecksum, 0, fileContext.getChecksumType(), fileContext.getBytesPerChecksum());
}
@@ -1081,6 +1071,11 @@ public class HFileBlock implements Cacheable {
Bytes.putInt(dest, offset, onDiskDataSize);
}
+ private void putHeader(ByteArrayOutputStream dest, int onDiskSize,
+ int uncompressedSize, int onDiskDataSize) {
+ putHeader(dest.getBuffer(),0, onDiskSize, uncompressedSize, onDiskDataSize);
+ }
+
/**
* Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records
* the offset of this block so that it can be referenced in the next block
@@ -1113,7 +1108,7 @@ public class HFileBlock implements Cacheable {
protected void finishBlockAndWriteHeaderAndData(DataOutputStream out)
throws IOException {
ensureBlockReady();
- out.write(onDiskBlockBytesWithHeader);
+ out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size());
out.write(onDiskChecksum);
}
@@ -1132,12 +1127,12 @@ public class HFileBlock implements Cacheable {
// This is not very optimal, because we are doing an extra copy.
// But this method is used only by unit tests.
byte[] output =
- new byte[onDiskBlockBytesWithHeader.length
+ new byte[onDiskBlockBytesWithHeader.size()
+ onDiskChecksum.length];
- System.arraycopy(onDiskBlockBytesWithHeader, 0, output, 0,
- onDiskBlockBytesWithHeader.length);
+ System.arraycopy(onDiskBlockBytesWithHeader.getBuffer(), 0, output, 0,
+ onDiskBlockBytesWithHeader.size());
System.arraycopy(onDiskChecksum, 0, output,
- onDiskBlockBytesWithHeader.length, onDiskChecksum.length);
+ onDiskBlockBytesWithHeader.size(), onDiskChecksum.length);
return output;
}
@@ -1165,7 +1160,7 @@ public class HFileBlock implements Cacheable {
*/
int getOnDiskSizeWithoutHeader() {
expectState(State.BLOCK_READY);
- return onDiskBlockBytesWithHeader.length +
+ return onDiskBlockBytesWithHeader.size() +
onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE;
}
@@ -1178,7 +1173,7 @@ public class HFileBlock implements Cacheable {
*/
int getOnDiskSizeWithHeader() {
expectState(State.BLOCK_READY);
- return onDiskBlockBytesWithHeader.length + onDiskChecksum.length;
+ return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length;
}
/**
@@ -1186,7 +1181,7 @@ public class HFileBlock implements Cacheable {
*/
int getUncompressedSizeWithoutHeader() {
expectState(State.BLOCK_READY);
- return uncompressedBlockBytesWithHeader.length - HConstants.HFILEBLOCK_HEADER_SIZE;
+ return baosInMemory.size() - HConstants.HFILEBLOCK_HEADER_SIZE;
}
/**
@@ -1194,7 +1189,7 @@ public class HFileBlock implements Cacheable {
*/
int getUncompressedSizeWithHeader() {
expectState(State.BLOCK_READY);
- return uncompressedBlockBytesWithHeader.length;
+ return baosInMemory.size();
}
/** @return true if a block is being written */
@@ -1215,29 +1210,37 @@ public class HFileBlock implements Cacheable {
}
/**
- * Returns the header followed by the uncompressed data, even if using
+ * Clones the header followed by the uncompressed data, even if using
* compression. This is needed for storing uncompressed blocks in the block
* cache. Can be called in the "writing" state or the "block ready" state.
* Returns only the header and data, does not include checksum data.
*
- * @return uncompressed block bytes for caching on write
+ * @return Returns a copy of uncompressed block bytes for caching on write
*/
- ByteBuffer getUncompressedBufferWithHeader() {
+ @VisibleForTesting
+ ByteBuffer cloneUncompressedBufferWithHeader() {
expectState(State.BLOCK_READY);
+ byte[] uncompressedBlockBytesWithHeader = baosInMemory.toByteArray();
+ int numBytes = (int) ChecksumUtil.numBytes(
+ onDiskBlockBytesWithHeader.size(),
+ fileContext.getBytesPerChecksum());
+ putHeader(uncompressedBlockBytesWithHeader, 0,
+ onDiskBlockBytesWithHeader.size() + numBytes,
+ baosInMemory.size(), onDiskBlockBytesWithHeader.size());
return ByteBuffer.wrap(uncompressedBlockBytesWithHeader);
}
/**
- * Returns the header followed by the on-disk (compressed/encoded/encrypted) data. This is
+ * Clones the header followed by the on-disk (compressed/encoded/encrypted) data. This is
* needed for storing packed blocks in the block cache. Expects calling semantics identical to
* {@link #getUncompressedBufferWithHeader()}. Returns only the header and data,
* Does not include checksum data.
*
- * @return packed block bytes for caching on write
+ * @return Returns a copy of block bytes for caching on write
*/
- ByteBuffer getOnDiskBufferWithHeader() {
+ private ByteBuffer cloneOnDiskBufferWithHeader() {
expectState(State.BLOCK_READY);
- return ByteBuffer.wrap(onDiskBlockBytesWithHeader);
+ return ByteBuffer.wrap(onDiskBlockBytesWithHeader.toByteArray());
}
private void expectState(State expectedState) {
@@ -1268,7 +1271,9 @@ public class HFileBlock implements Cacheable {
* the byte buffer passed into the constructor of this newly created
* block does not have checksum data even though the header minor
* version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
- * 0 value in bytesPerChecksum.
+ * 0 value in bytesPerChecksum. This method copies the on-disk or
+ * uncompressed data to build the HFileBlock which is used only
+ * while writing blocks and caching.
*
* <p>TODO: Should there be an option where a cache can ask that hbase preserve block
* checksums for checking after a block comes out of the cache? Otehrwise, cache is responsible
@@ -1289,10 +1294,10 @@ public class HFileBlock implements Cacheable {
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
getUncompressedSizeWithoutHeader(), prevOffset,
cacheConf.shouldCacheCompressed(blockType.getCategory())?
- getOnDiskBufferWithHeader() :
- getUncompressedBufferWithHeader(),
+ cloneOnDiskBufferWithHeader() :
+ cloneUncompressedBufferWithHeader(),
FILL_HEADER, startOffset, UNSET,
- onDiskBlockBytesWithHeader.length + onDiskChecksum.length, newContext);
+ onDiskBlockBytesWithHeader.size() + onDiskChecksum.length, newContext);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6bd31090/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index 1c87af4..68c4587 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -390,7 +390,7 @@ public class TestHFileBlock {
writeTestKeyValues(hbw, blockId, includesMemstoreTS, includesTag);
hbw.writeHeaderAndData(os);
int headerLen = HConstants.HFILEBLOCK_HEADER_SIZE;
- byte[] encodedResultWithHeader = hbw.getUncompressedBufferWithHeader().array();
+ byte[] encodedResultWithHeader = hbw.cloneUncompressedBufferWithHeader().array();
final int encodedSize = encodedResultWithHeader.length - headerLen;
if (encoding != DataBlockEncoding.NONE) {
// We need to account for the two-byte encoding algorithm ID that
@@ -798,7 +798,7 @@ public class TestHFileBlock {
totalSize += hbw.getOnDiskSizeWithHeader();
if (cacheOnWrite)
- expectedContents.add(hbw.getUncompressedBufferWithHeader());
+ expectedContents.add(hbw.cloneUncompressedBufferWithHeader());
if (detailedLogging) {
LOG.info("Written block #" + i + " of type " + bt
[10/23] hbase git commit: Update home page to say hbasecon2017 is on
google campus in MTV, not SF
Posted by sy...@apache.org.
Update home page to say hbasecon2017 is on google campus in MTV, not SF
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/11dc5bf6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/11dc5bf6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/11dc5bf6
Branch: refs/heads/hbase-12439
Commit: 11dc5bf6715a1cd8fe191cfcb299688af24865f8
Parents: 1cfd22b
Author: Michael Stack <st...@apache.org>
Authored: Tue Mar 21 14:01:01 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Tue Mar 21 14:01:06 2017 -0700
----------------------------------------------------------------------
src/main/site/xdoc/index.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/11dc5bf6/src/main/site/xdoc/index.xml
----------------------------------------------------------------------
diff --git a/src/main/site/xdoc/index.xml b/src/main/site/xdoc/index.xml
index 49b9c0d..83c9f01 100644
--- a/src/main/site/xdoc/index.xml
+++ b/src/main/site/xdoc/index.xml
@@ -83,7 +83,7 @@ Apache HBase is an open-source, distributed, versioned, non-relational database
</section>
<section name="News">
- <p>June 12th, 2017 <a href="https://easychair.org/cfp/hbasecon2017">HBaseCon2017</a> in San Francisco</p>
+ <p>June 12th, 2017 <a href="https://easychair.org/cfp/hbasecon2017">HBaseCon2017</a> at the Crittenden Buildings on the Google Mountain View Campus</p>
<p>December 8th, 2016 <a href="https://www.meetup.com/hbaseusergroup/events/235542241/">Meetup@Splice</a> in San Francisco</p>
<p>September 26th, 2016 <a href="http://www.meetup.com/HBase-NYC/events/233024937/">HBaseConEast2016</a> at Google in Chelsea, NYC</p>
<p>May 24th, 2016 <a href="http://www.hbasecon.com/">HBaseCon2016</a> at The Village, 969 Market, San Francisco</p>
[14/23] hbase git commit: HBASE-17809 cleanup unused class
Posted by sy...@apache.org.
HBASE-17809 cleanup unused class
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fe3c32eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fe3c32eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fe3c32eb
Branch: refs/heads/hbase-12439
Commit: fe3c32ebd56da9c7852c70951cb9f8fadfdf7719
Parents: f2d1b8d
Author: CHIA-PING TSAI <ch...@gmail.com>
Authored: Mon Mar 20 17:40:28 2017 +0800
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Thu Mar 23 14:15:28 2017 +0800
----------------------------------------------------------------------
dev-support/findbugs-exclude.xml | 8 -
.../client/DelegatingRetryingCallable.java | 64 -
.../hbase/client/ScannerTimeoutException.java | 44 -
.../hbase/exceptions/LockTimeoutException.java | 43 -
.../exceptions/OperationConflictException.java | 49 -
.../quotas/InvalidQuotaSettingsException.java | 32 -
.../apache/hadoop/hbase/ShareableMemory.java | 39 -
.../hadoop/hbase/util/BoundedArrayQueue.java | 81 --
.../hadoop/hbase/util/ChecksumFactory.java | 99 --
.../hbase/util/TestBoundedArrayQueue.java | 60 -
.../encode/tokenize/TokenDepthComparator.java | 64 -
.../hadoop/hbase/regionserver/LruHashMap.java | 1102 ------------------
.../regionserver/RegionMergeTransaction.java | 248 ----
.../org/apache/hadoop/hbase/util/MetaUtils.java | 155 ---
.../hadoop/hbase/util/SortedCopyOnWriteSet.java | 177 ---
.../apache/hadoop/hbase/io/TestHeapSize.java | 2 +-
.../master/TestDistributedLogSplitting.java | 3 +-
.../hadoop/hbase/util/MultiThreadedUpdater.java | 3 +-
.../hbase/util/TestSortedCopyOnWriteSet.java | 106 --
19 files changed, 3 insertions(+), 2376 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index 37d5746..3162cb2 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -66,14 +66,6 @@
</Match>
<Match>
- <Class name="org.apache.hadoop.hbase.regionserver.LruHashMap"/>
- <Or>
- <Method name="equals"/>
- </Or>
- <Bug pattern="EQ_UNUSUAL"/>
- </Match>
-
- <Match>
<Class name="org.apache.hadoop.hbase.util.ByteBufferUtils"/>
<Or>
<Method name="putInt"/>
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
deleted file mode 100644
index b7d77f3..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Helper callable for internal use when you just want to override a single method of a {@link
- * RetryingCallable}. By default, this just delegates all {@link RetryingCallable} methods to the
- * specified delegate.
- * @param <T> Result class from calls to the delegate {@link RetryingCallable}
- * @param <D> Type of the delegate class
- */
-@InterfaceAudience.Private
-public class DelegatingRetryingCallable<T, D extends RetryingCallable<T>> implements
- RetryingCallable<T> {
- protected final D delegate;
-
- public DelegatingRetryingCallable(D delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public T call(int callTimeout) throws Exception {
- return delegate.call(callTimeout);
- }
-
- @Override
- public void prepare(boolean reload) throws IOException {
- delegate.prepare(reload);
- }
-
- @Override
- public void throwable(Throwable t, boolean retrying) {
- delegate.throwable(t, retrying);
- }
-
- @Override
- public String getExceptionMessageAdditionalDetail() {
- return delegate.getExceptionMessageAdditionalDetail();
- }
-
- @Override
- public long sleep(long pause, int tries) {
- return delegate.sleep(pause, tries);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
deleted file mode 100644
index 9e0827c..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.client;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * Thrown when a scanner has timed out.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class ScannerTimeoutException extends DoNotRetryIOException {
-
- private static final long serialVersionUID = 8788838690290688313L;
-
- /** default constructor */
- ScannerTimeoutException() {
- super();
- }
-
- /** @param s */
- ScannerTimeoutException(String s) {
- super(s);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
deleted file mode 100644
index b6b3c32..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.exceptions;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-
-/**
- * Thrown when there is a timeout when trying to acquire a lock
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class LockTimeoutException extends DoNotRetryIOException {
-
- private static final long serialVersionUID = -1770764924258999825L;
-
- /** Default constructor */
- public LockTimeoutException() {
- super();
- }
-
- public LockTimeoutException(String s) {
- super(s);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java
deleted file mode 100644
index c40b8d9..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/OperationConflictException.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.exceptions;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * The exception that is thrown if there's duplicate execution of non-idempotent operation.
- * Client should not retry; may use "get" to get the desired value.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class OperationConflictException extends DoNotRetryIOException {
- private static final long serialVersionUID = -8930333627489862872L;
-
- public OperationConflictException() {
- super();
- }
-
- public OperationConflictException(String message) {
- super(message);
- }
-
- public OperationConflictException(Throwable cause) {
- super(cause);
- }
-
- public OperationConflictException(String message, Throwable cause) {
- super(message, cause);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
deleted file mode 100644
index 54a1545..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.quotas;
-
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Generic quota exceeded exception for invalid settings
- */
-@InterfaceAudience.Private
-public class InvalidQuotaSettingsException extends DoNotRetryIOException {
- public InvalidQuotaSettingsException(String msg) {
- super(msg);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java
deleted file mode 100644
index 6a6ae59..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ShareableMemory.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * A cell implementing this interface would mean that the memory area backing this cell will refer
- * to a memory area that could be part of a larger common memory area used by the RegionServer. This
- * might be the bigger memory chunk where the RPC requests are read into. If an exclusive instance
- * is required, use the {@link #cloneToCell()} to have the contents of the cell copied to an
- * exclusive memory area.
- */
-@InterfaceAudience.Private
-public interface ShareableMemory {
-
- /**
- * Does a deep copy of the contents to a new memory area and returns it in the form of a cell.
- * @return The deep cloned cell
- */
- Cell cloneToCell();
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java
deleted file mode 100644
index 9db4c5c..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedArrayQueue.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.util.AbstractQueue;
-import java.util.Iterator;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * A bounded non-thread safe implementation of {@link java.util.Queue}.
- */
-@InterfaceAudience.Private
-public class BoundedArrayQueue<E> extends AbstractQueue<E> {
-
- private Object[] items;
- private int takeIndex, putIndex;
- private int count;
-
- public BoundedArrayQueue(int maxElements) {
- items = new Object[maxElements];
- }
-
- @Override
- public int size() {
- return count;
- }
-
- /**
- * Not implemented and will throw {@link UnsupportedOperationException}
- */
- @Override
- public Iterator<E> iterator() {
- // We don't need this. Leaving it as not implemented.
- throw new UnsupportedOperationException();
- }
-
- @Override
- public boolean offer(E e) {
- if (count == items.length) return false;
- items[putIndex] = e;
- if (++putIndex == items.length) putIndex = 0;
- count++;
- return true;
- }
-
- @Override
- public E poll() {
- return (count == 0) ? null : dequeue();
- }
-
- @SuppressWarnings("unchecked")
- private E dequeue() {
- E x = (E) items[takeIndex];
- items[takeIndex] = null;
- if (++takeIndex == items.length) takeIndex = 0;
- count--;
- return x;
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public E peek() {
- return (E) items[takeIndex];
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
deleted file mode 100644
index 414832d..0000000
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.zip.Checksum;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Utility class that is used to generate a Checksum object.
- * The Checksum implementation is pluggable and an application
- * can specify their own class that implements their own
- * Checksum algorithm.
- */
-@InterfaceAudience.Private
-public class ChecksumFactory {
-
- static private final Class<?>[] EMPTY_ARRAY = new Class[]{};
-
- /**
- * Create a new instance of a Checksum object.
- * @return The newly created Checksum object
- */
- static public Checksum newInstance(String className) throws IOException {
- try {
- Class<?> clazz = getClassByName(className);
- return (Checksum)newInstance(clazz);
- } catch (ClassNotFoundException e) {
- throw new IOException(e);
- }
- }
-
- /**
- * Returns a Constructor that can be used to create a Checksum object.
- * @param className classname for which an constructor is created
- * @return a new Constructor object
- */
- static public Constructor<?> newConstructor(String className)
- throws IOException {
- try {
- Class<?> clazz = getClassByName(className);
- Constructor<?> ctor = clazz.getDeclaredConstructor(EMPTY_ARRAY);
- ctor.setAccessible(true);
- return ctor;
- } catch (ClassNotFoundException e) {
- throw new IOException(e);
- } catch (java.lang.NoSuchMethodException e) {
- throw new IOException(e);
- }
- }
-
- /** Create an object for the given class and initialize it from conf
- *
- * @param theClass class of which an object is created
- * @return a new object
- */
- static private <T> T newInstance(Class<T> theClass) {
- T result;
- try {
- Constructor<T> ctor = theClass.getDeclaredConstructor(EMPTY_ARRAY);
- ctor.setAccessible(true);
- result = ctor.newInstance();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- return result;
- }
-
- /**
- * Load a class by name.
- * @param name the class name.
- * @return the class object.
- * @throws ClassNotFoundException if the class is not found.
- */
- static private Class<?> getClassByName(String name)
- throws ClassNotFoundException {
- ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
- return Class.forName(name, true, classLoader);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java
deleted file mode 100644
index 6d9c496..0000000
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ MiscTests.class, SmallTests.class })
-public class TestBoundedArrayQueue {
-
- private int qMaxElements = 5;
- private BoundedArrayQueue<Integer> queue = new BoundedArrayQueue<>(qMaxElements);
-
- @Test
- public void testBoundedArrayQueueOperations() throws Exception {
- assertEquals(0, queue.size());
- assertNull(queue.poll());
- assertNull(queue.peek());
- for(int i=0;i<qMaxElements;i++){
- assertTrue(queue.offer(i));
- }
- assertEquals(qMaxElements, queue.size());
- assertFalse(queue.offer(0));
- assertEquals(0, queue.peek().intValue());
- assertEquals(0, queue.peek().intValue());
- for (int i = 0; i < qMaxElements; i++) {
- assertEquals(i, queue.poll().intValue());
- }
- assertEquals(0, queue.size());
- assertNull(queue.poll());
- // Write after one cycle is over
- assertTrue(queue.offer(100));
- assertTrue(queue.offer(1000));
- assertEquals(100, queue.peek().intValue());
- assertEquals(100, queue.poll().intValue());
- assertEquals(1000, queue.poll().intValue());
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java
deleted file mode 100644
index 6ebf20b..0000000
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenDepthComparator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize;
-
-import java.util.Comparator;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Determines order of nodes in the output array. Maybe possible to optimize further.
- */
-@InterfaceAudience.Private
-public class TokenDepthComparator implements Comparator<TokenizerNode> {
-
- @Override
- public int compare(TokenizerNode a, TokenizerNode b) {
- if(a==null){
- throw new IllegalArgumentException("a cannot be null");
- }
- if(b==null){
- throw new IllegalArgumentException("b cannot be null");
- }
-
- // put leaves at the end
- if (!a.isLeaf() && b.isLeaf()) {
- return -1;
- }
- if (a.isLeaf() && !b.isLeaf()) {
- return 1;
- }
-
- if (a.isLeaf() && b.isLeaf()) {// keep leaves in sorted order (for debugability)
- return a.getId() < b.getId() ? -1 : 1;
- }
-
- // compare depth
- if (a.getTokenOffset() < b.getTokenOffset()) {
- return -1;
- }
- if (a.getTokenOffset() > b.getTokenOffset()) {
- return 1;
- }
-
- // if same depth, return lower id first. ids are unique
- return a.getId() < b.getId() ? -1 : 1;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
deleted file mode 100644
index a339abf..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
+++ /dev/null
@@ -1,1102 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * The LruHashMap is a memory-aware HashMap with a configurable maximum
- * memory footprint.
- * <p>
- * It maintains an ordered list of all entries in the/ map ordered by
- * access time. When space needs to be freed becase the maximum has been
- * reached, or the application has asked to free memory, entries will be
- * evicted according to an LRU (least-recently-used) algorithm. That is,
- * those entries which have not been accessed the longest will be evicted
- * first.
- * <p>
- * Both the Key and Value Objects used for this class must extend
- * <code>HeapSize</code> in order to track heap usage.
- * <p>
- * This class contains internal synchronization and is thread-safe.
- */
-@InterfaceAudience.Private
-public class LruHashMap<K extends HeapSize, V extends HeapSize>
-implements HeapSize, Map<K,V> {
-
- private static final Log LOG = LogFactory.getLog(LruHashMap.class);
-
- /** The default size (in bytes) of the LRU */
- private static final long DEFAULT_MAX_MEM_USAGE = 50000;
- /** The default capacity of the hash table */
- private static final int DEFAULT_INITIAL_CAPACITY = 16;
- /** The maxmum capacity of the hash table */
- private static final int MAXIMUM_CAPACITY = 1 << 30;
- /** The default load factor to use */
- private static final float DEFAULT_LOAD_FACTOR = 0.75f;
-
- /** Memory overhead of this Object (for HeapSize) */
- private static final int OVERHEAD = 5 * Bytes.SIZEOF_LONG +
- 2 * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_FLOAT + 3 * ClassSize.REFERENCE +
- 1 * ClassSize.ARRAY;
-
- /** Load factor allowed (usually 75%) */
- private final float loadFactor;
- /** Number of key/vals in the map */
- private int size;
- /** Size at which we grow hash */
- private int threshold;
- /** Entries in the map */
- private Entry [] entries;
-
- /** Pointer to least recently used entry */
- private Entry<K,V> headPtr;
- /** Pointer to most recently used entry */
- private Entry<K,V> tailPtr;
-
- /** Maximum memory usage of this map */
- private long memTotal = 0;
- /** Amount of available memory */
- private long memFree = 0;
-
- /** Number of successful (found) get() calls */
- private long hitCount = 0;
- /** Number of unsuccessful (not found) get() calls */
- private long missCount = 0;
-
- /**
- * Constructs a new, empty map with the specified initial capacity,
- * load factor, and maximum memory usage.
- *
- * @param initialCapacity the initial capacity
- * @param loadFactor the load factor
- * @param maxMemUsage the maximum total memory usage
- * @throws IllegalArgumentException if the initial capacity is less than one
- * @throws IllegalArgumentException if the initial capacity is greater than
- * the maximum capacity
- * @throws IllegalArgumentException if the load factor is <= 0
- * @throws IllegalArgumentException if the max memory usage is too small
- * to support the base overhead
- */
- public LruHashMap(int initialCapacity, float loadFactor,
- long maxMemUsage) {
- if (initialCapacity < 1) {
- throw new IllegalArgumentException("Initial capacity must be > 0");
- }
- if (initialCapacity > MAXIMUM_CAPACITY) {
- throw new IllegalArgumentException("Initial capacity is too large");
- }
- if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
- throw new IllegalArgumentException("Load factor must be > 0");
- }
- if (maxMemUsage <= (OVERHEAD + initialCapacity * ClassSize.REFERENCE)) {
- throw new IllegalArgumentException("Max memory usage too small to " +
- "support base overhead");
- }
-
- /** Find a power of 2 >= initialCapacity */
- int capacity = calculateCapacity(initialCapacity);
- this.loadFactor = loadFactor;
- this.threshold = calculateThreshold(capacity,loadFactor);
- this.entries = new Entry[capacity];
- this.memFree = maxMemUsage;
- this.memTotal = maxMemUsage;
- init();
- }
-
- /**
- * Constructs a new, empty map with the specified initial capacity and
- * load factor, and default maximum memory usage.
- *
- * @param initialCapacity the initial capacity
- * @param loadFactor the load factor
- * @throws IllegalArgumentException if the initial capacity is less than one
- * @throws IllegalArgumentException if the initial capacity is greater than
- * the maximum capacity
- * @throws IllegalArgumentException if the load factor is <= 0
- */
- public LruHashMap(int initialCapacity, float loadFactor) {
- this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE);
- }
-
- /**
- * Constructs a new, empty map with the specified initial capacity and
- * with the default load factor and maximum memory usage.
- *
- * @param initialCapacity the initial capacity
- * @throws IllegalArgumentException if the initial capacity is less than one
- * @throws IllegalArgumentException if the initial capacity is greater than
- * the maximum capacity
- */
- public LruHashMap(int initialCapacity) {
- this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_MAX_MEM_USAGE);
- }
-
- /**
- * Constructs a new, empty map with the specified maximum memory usage
- * and with default initial capacity and load factor.
- *
- * @param maxMemUsage the maximum total memory usage
- * @throws IllegalArgumentException if the max memory usage is too small
- * to support the base overhead
- */
- public LruHashMap(long maxMemUsage) {
- this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR,
- maxMemUsage);
- }
-
- /**
- * Constructs a new, empty map with the default initial capacity,
- * load factor and maximum memory usage.
- */
- public LruHashMap() {
- this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR,
- DEFAULT_MAX_MEM_USAGE);
- }
-
- //--------------------------------------------------------------------------
- /**
- * Get the currently available memory for this LRU in bytes.
- * This is (maxAllowed - currentlyUsed).
- *
- * @return currently available bytes
- */
- public synchronized long getMemFree() {
- return memFree;
- }
-
- /**
- * Get the maximum memory allowed for this LRU in bytes.
- *
- * @return maximum allowed bytes
- */
- public long getMemMax() {
- return memTotal;
- }
-
- /**
- * Get the currently used memory for this LRU in bytes.
- *
- * @return currently used memory in bytes
- */
- public long getMemUsed() {
- return (memTotal - getMemFree()); // FindBugs IS2_INCONSISTENT_SYNC
- }
-
- /**
- * Get the number of hits to the map. This is the number of times
- * a call to get() returns a matched key.
- *
- * @return number of hits
- */
- public long getHitCount() {
- return hitCount;
- }
-
- /**
- * Get the number of misses to the map. This is the number of times
- * a call to get() returns null.
- *
- * @return number of misses
- */
- public synchronized long getMissCount() {
- return missCount; // FindBugs IS2_INCONSISTENT_SYNC
- }
-
- /**
- * Get the hit ratio. This is the number of hits divided by the
- * total number of requests.
- *
- * @return hit ratio (double between 0 and 1)
- */
- public double getHitRatio() {
- return (double)((double)hitCount/
- ((double)(hitCount + getMissCount())));
- }
-
- /**
- * Free the requested amount of memory from the LRU map.
- *
- * This will do LRU eviction from the map until at least as much
- * memory as requested is freed. This does not affect the maximum
- * memory usage parameter.
- *
- * @param requestedAmount memory to free from LRU in bytes
- * @return actual amount of memory freed in bytes
- */
- public synchronized long freeMemory(long requestedAmount) throws Exception {
- if(requestedAmount > (getMemUsed() - getMinimumUsage())) {
- return clearAll();
- }
- long freedMemory = 0;
- while(freedMemory < requestedAmount) {
- freedMemory += evictFromLru();
- }
- return freedMemory;
- }
-
- /**
- * The total memory usage of this map
- *
- * @return memory usage of map in bytes
- */
- public long heapSize() {
- return (memTotal - getMemFree());
- }
-
- //--------------------------------------------------------------------------
- /**
- * Retrieves the value associated with the specified key.
- *
- * If an entry is found, it is updated in the LRU as the most recently
- * used (last to be evicted) entry in the map.
- *
- * @param key the key
- * @return the associated value, or null if none found
- * @throws NullPointerException if key is null
- */
- public synchronized V get(Object key) {
- checkKey((K)key);
- int hash = hash(key);
- int i = hashIndex(hash, entries.length);
- Entry<K,V> e = entries[i];
- while (true) {
- if (e == null) {
- missCount++;
- return null;
- }
- if (e.hash == hash && isEqual(key, e.key)) {
- // Hit! Update position in LRU
- hitCount++;
- updateLru(e);
- return e.value;
- }
- e = e.next;
- }
- }
-
- /**
- * Insert a key-value mapping into the map.
- *
- * Entry will be inserted as the most recently used.
- *
- * Both the key and value are required to be Objects and must
- * implement the HeapSize interface.
- *
- * @param key the key
- * @param value the value
- * @return the value that was previously mapped to this key, null if none
- * @throws UnsupportedOperationException if either objects do not
- * implement HeapSize
- * @throws NullPointerException if the key or value is null
- */
- public synchronized V put(K key, V value) {
- checkKey(key);
- checkValue(value);
- int hash = hash(key);
- int i = hashIndex(hash, entries.length);
-
- // For old values
- for (Entry<K,V> e = entries[i]; e != null; e = e.next) {
- if (e.hash == hash && isEqual(key, e.key)) {
- V oldValue = e.value;
- long memChange = e.replaceValue(value);
- checkAndFreeMemory(memChange);
- // If replacing an old value for this key, update in LRU
- updateLru(e);
- return oldValue;
- }
- }
- long memChange = addEntry(hash, key, value, i);
- checkAndFreeMemory(memChange);
- return null;
- }
-
- /**
- * Deletes the mapping for the specified key if it exists.
- *
- * @param key the key of the entry to be removed from the map
- * @return the value associated with the specified key, or null
- * if no mapping exists.
- */
- public synchronized V remove(Object key) {
- Entry<K,V> e = removeEntryForKey((K)key);
- if(e == null) return null;
- // Add freed memory back to available
- memFree += e.heapSize();
- return e.value;
- }
-
- /**
- * Gets the size (number of entries) of the map.
- *
- * @return size of the map
- */
- public int size() {
- return size;
- }
-
- /**
- * Checks whether the map is currently empty.
- *
- * @return true if size of map is zero
- */
- public boolean isEmpty() {
- return size == 0;
- }
-
- /**
- * Clears all entries from the map.
- *
- * This frees all entries, tracking memory usage along the way.
- * All references to entries are removed so they can be GC'd.
- */
- public synchronized void clear() {
- memFree += clearAll();
- }
-
- //--------------------------------------------------------------------------
- /**
- * Checks whether there is a value in the map for the specified key.
- *
- * Does not affect the LRU.
- *
- * @param key the key to check
- * @return true if the map contains a value for this key, false if not
- * @throws NullPointerException if the key is null
- */
- public synchronized boolean containsKey(Object key) {
- checkKey((K)key);
- int hash = hash(key);
- int i = hashIndex(hash, entries.length);
- Entry e = entries[i];
- while (e != null) {
- if (e.hash == hash && isEqual(key, e.key))
- return true;
- e = e.next;
- }
- return false;
- }
-
- /**
- * Checks whether this is a mapping which contains the specified value.
- *
- * Does not affect the LRU. This is an inefficient operation.
- *
- * @param value the value to check
- * @return true if the map contains an entry for this value, false
- * if not
- * @throws NullPointerException if the value is null
- */
- public synchronized boolean containsValue(Object value) {
- checkValue((V)value);
- Entry[] tab = entries;
- for (int i = 0; i < tab.length ; i++)
- for (Entry e = tab[i] ; e != null ; e = e.next)
- if (value.equals(e.value))
- return true;
- return false;
- }
-
- //--------------------------------------------------------------------------
- /**
- * Enforces key constraints. Null keys are not permitted and key must
- * implement HeapSize. It should not be necessary to verify the second
- * constraint because that's enforced on instantiation?
- *
- * Can add other constraints in the future.
- *
- * @param key the key
- * @throws NullPointerException if the key is null
- * @throws UnsupportedOperationException if the key class does not
- * implement the HeapSize interface
- */
- private void checkKey(K key) {
- if(key == null) {
- throw new NullPointerException("null keys are not allowed");
- }
- }
-
- /**
- * Enforces value constraints. Null values are not permitted and value must
- * implement HeapSize. It should not be necessary to verify the second
- * constraint because that's enforced on instantiation?
- *
- * Can add other contraints in the future.
- *
- * @param value the value
- * @throws NullPointerException if the value is null
- * @throws UnsupportedOperationException if the value class does not
- * implement the HeapSize interface
- */
- private void checkValue(V value) {
- if(value == null) {
- throw new NullPointerException("null values are not allowed");
- }
- }
-
- /**
- * Returns the minimum memory usage of the base map structure.
- *
- * @return baseline memory overhead of object in bytes
- */
- private long getMinimumUsage() {
- return OVERHEAD + (entries.length * ClassSize.REFERENCE);
- }
-
- //--------------------------------------------------------------------------
- /**
- * Evicts and frees based on LRU until at least as much memory as requested
- * is available.
- *
- * @param memNeeded the amount of memory needed in bytes
- */
- private void checkAndFreeMemory(long memNeeded) {
- while(memFree < memNeeded) {
- evictFromLru();
- }
- memFree -= memNeeded;
- }
-
- /**
- * Evicts based on LRU. This removes all references and updates available
- * memory.
- *
- * @return amount of memory freed in bytes
- */
- private long evictFromLru() {
- long freed = headPtr.heapSize();
- memFree += freed;
- removeEntry(headPtr);
- return freed;
- }
-
- /**
- * Moves the specified entry to the most recently used slot of the
- * LRU. This is called whenever an entry is fetched.
- *
- * @param e entry that was accessed
- */
- private void updateLru(Entry<K,V> e) {
- Entry<K,V> prev = e.getPrevPtr();
- Entry<K,V> next = e.getNextPtr();
- if(next != null) {
- if(prev != null) {
- prev.setNextPtr(next);
- next.setPrevPtr(prev);
- } else {
- headPtr = next;
- headPtr.setPrevPtr(null);
- }
- e.setNextPtr(null);
- e.setPrevPtr(tailPtr);
- tailPtr.setNextPtr(e);
- tailPtr = e;
- }
- }
-
- /**
- * Removes the specified entry from the map and LRU structure.
- *
- * @param entry entry to be removed
- */
- private void removeEntry(Entry<K,V> entry) {
- K k = entry.key;
- int hash = entry.hash;
- int i = hashIndex(hash, entries.length);
- Entry<K,V> prev = entries[i];
- Entry<K,V> e = prev;
-
- while (e != null) {
- Entry<K,V> next = e.next;
- if (e.hash == hash && isEqual(k, e.key)) {
- size--;
- if (prev == e) {
- entries[i] = next;
- } else {
- prev.next = next;
- }
-
- Entry<K,V> prevPtr = e.getPrevPtr();
- Entry<K,V> nextPtr = e.getNextPtr();
-
- if(prevPtr != null && nextPtr != null) {
- prevPtr.setNextPtr(nextPtr);
- nextPtr.setPrevPtr(prevPtr);
- } else if(prevPtr != null) {
- tailPtr = prevPtr;
- prevPtr.setNextPtr(null);
- } else if(nextPtr != null) {
- headPtr = nextPtr;
- nextPtr.setPrevPtr(null);
- }
-
- return;
- }
- prev = e;
- e = next;
- }
- }
-
- /**
- * Removes and returns the entry associated with the specified
- * key.
- *
- * @param key key of the entry to be deleted
- * @return entry that was removed, or null if none found
- */
- private Entry<K,V> removeEntryForKey(K key) {
- int hash = hash(key);
- int i = hashIndex(hash, entries.length);
- Entry<K,V> prev = entries[i];
- Entry<K,V> e = prev;
-
- while (e != null) {
- Entry<K,V> next = e.next;
- if (e.hash == hash && isEqual(key, e.key)) {
- size--;
- if (prev == e) {
- entries[i] = next;
- } else {
- prev.next = next;
- }
-
- // Updating LRU
- Entry<K,V> prevPtr = e.getPrevPtr();
- Entry<K,V> nextPtr = e.getNextPtr();
- if(prevPtr != null && nextPtr != null) {
- prevPtr.setNextPtr(nextPtr);
- nextPtr.setPrevPtr(prevPtr);
- } else if(prevPtr != null) {
- tailPtr = prevPtr;
- prevPtr.setNextPtr(null);
- } else if(nextPtr != null) {
- headPtr = nextPtr;
- nextPtr.setPrevPtr(null);
- }
-
- return e;
- }
- prev = e;
- e = next;
- }
-
- return e;
- }
-
- /**
- * Adds a new entry with the specified key, value, hash code, and
- * bucket index to the map.
- *
- * Also puts it in the bottom (most-recent) slot of the list and
- * checks to see if we need to grow the array.
- *
- * @param hash hash value of key
- * @param key the key
- * @param value the value
- * @param bucketIndex index into hash array to store this entry
- * @return the amount of heap size used to store the new entry
- */
- private long addEntry(int hash, K key, V value, int bucketIndex) {
- Entry<K,V> e = entries[bucketIndex];
- Entry<K,V> newE = new Entry<>(hash, key, value, e, tailPtr);
- entries[bucketIndex] = newE;
- // add as most recently used in lru
- if (size == 0) {
- headPtr = newE;
- tailPtr = newE;
- } else {
- newE.setPrevPtr(tailPtr);
- tailPtr.setNextPtr(newE);
- tailPtr = newE;
- }
- // Grow table if we are past the threshold now
- if (size++ >= threshold) {
- growTable(2 * entries.length);
- }
- return newE.heapSize();
- }
-
- /**
- * Clears all the entries in the map. Tracks the amount of memory being
- * freed along the way and returns the total.
- *
- * Cleans up all references to allow old entries to be GC'd.
- *
- * @return total memory freed in bytes
- */
- private long clearAll() {
- Entry cur;
- long freedMemory = 0;
- for(int i=0; i<entries.length; i++) {
- cur = entries[i];
- while(cur != null) {
- freedMemory += cur.heapSize();
- cur = cur.next;
- }
- entries[i] = null;
- }
- headPtr = null;
- tailPtr = null;
- size = 0;
- return freedMemory;
- }
-
- //--------------------------------------------------------------------------
- /**
- * Recreates the entire contents of the hashmap into a new array
- * with double the capacity. This method is called when the number of
- * keys in the map reaches the current threshold.
- *
- * @param newCapacity the new size of the hash entries
- */
- private void growTable(int newCapacity) {
- Entry [] oldTable = entries;
- int oldCapacity = oldTable.length;
-
- // Do not allow growing the table beyond the max capacity
- if (oldCapacity == MAXIMUM_CAPACITY) {
- threshold = Integer.MAX_VALUE;
- return;
- }
-
- // Determine how much additional space will be required to grow the array
- long requiredSpace = (newCapacity - oldCapacity) * ClassSize.REFERENCE;
-
- // Verify/enforce we have sufficient memory to grow
- checkAndFreeMemory(requiredSpace);
-
- Entry [] newTable = new Entry[newCapacity];
-
- // Transfer existing entries to new hash table
- for(int i=0; i < oldCapacity; i++) {
- Entry<K,V> entry = oldTable[i];
- if(entry != null) {
- // Set to null for GC
- oldTable[i] = null;
- do {
- Entry<K,V> next = entry.next;
- int idx = hashIndex(entry.hash, newCapacity);
- entry.next = newTable[idx];
- newTable[idx] = entry;
- entry = next;
- } while(entry != null);
- }
- }
-
- entries = newTable;
- threshold = (int)(newCapacity * loadFactor);
- }
-
- /**
- * Gets the hash code for the specified key.
- * This implementation uses the additional hashing routine
- * from JDK 1.4.
- *
- * @param key the key to get a hash value for
- * @return the hash value
- */
- private int hash(Object key) {
- int h = key.hashCode();
- h += ~(h << 9);
- h ^= (h >>> 14);
- h += (h << 4);
- h ^= (h >>> 10);
- return h;
- }
-
- /**
- * Compares two objects for equality. Method uses equals method and
- * assumes neither value is null.
- *
- * @param x the first value
- * @param y the second value
- * @return true if equal
- */
- private boolean isEqual(Object x, Object y) {
- return (x == y || x.equals(y));
- }
-
- /**
- * Determines the index into the current hash table for the specified
- * hashValue.
- *
- * @param hashValue the hash value
- * @param length the current number of hash buckets
- * @return the index of the current hash array to use
- */
- private int hashIndex(int hashValue, int length) {
- return hashValue & (length - 1);
- }
-
- /**
- * Calculates the capacity of the array backing the hash
- * by normalizing capacity to a power of 2 and enforcing
- * capacity limits.
- *
- * @param proposedCapacity the proposed capacity
- * @return the normalized capacity
- */
- private int calculateCapacity(int proposedCapacity) {
- int newCapacity = 1;
- if(proposedCapacity > MAXIMUM_CAPACITY) {
- newCapacity = MAXIMUM_CAPACITY;
- } else {
- while(newCapacity < proposedCapacity) {
- newCapacity <<= 1;
- }
- if(newCapacity > MAXIMUM_CAPACITY) {
- newCapacity = MAXIMUM_CAPACITY;
- }
- }
- return newCapacity;
- }
-
- /**
- * Calculates the threshold of the map given the capacity and load
- * factor. Once the number of entries in the map grows to the
- * threshold we will double the size of the array.
- *
- * @param capacity the size of the array
- * @param factor the load factor of the hash
- */
- private int calculateThreshold(int capacity, float factor) {
- return (int)(capacity * factor);
- }
-
- /**
- * Set the initial heap usage of this class. Includes class variable
- * overhead and the entry array.
- */
- private void init() {
- memFree -= OVERHEAD;
- memFree -= (entries.length * ClassSize.REFERENCE);
- }
-
- //--------------------------------------------------------------------------
- /**
- * Debugging function that returns a List sorted by access time.
- *
- * The order is oldest to newest (first in list is next to be evicted).
- *
- * @return Sorted list of entries
- */
- public List<Entry<K,V>> entryLruList() {
- List<Entry<K,V>> entryList = new ArrayList<>();
- Entry<K,V> entry = headPtr;
- while(entry != null) {
- entryList.add(entry);
- entry = entry.getNextPtr();
- }
- return entryList;
- }
-
- /**
- * Debugging function that returns a Set of all entries in the hash table.
- *
- * @return Set of entries in hash
- */
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
- justification="Unused debugging function that reads only")
- public Set<Entry<K,V>> entryTableSet() {
- Set<Entry<K,V>> entrySet = new HashSet<>();
- Entry [] table = entries; // FindBugs IS2_INCONSISTENT_SYNC
- for(int i=0;i<table.length;i++) {
- for(Entry e = table[i]; e != null; e = e.next) {
- entrySet.add(e);
- }
- }
- return entrySet;
- }
-
- /**
- * Get the head of the linked list (least recently used).
- *
- * @return head of linked list
- */
- public Entry getHeadPtr() {
- return headPtr;
- }
-
- /**
- * Get the tail of the linked list (most recently used).
- *
- * @return tail of linked list
- */
- public Entry getTailPtr() {
- return tailPtr;
- }
-
- //--------------------------------------------------------------------------
- /**
- * To best optimize this class, some of the methods that are part of a
- * Map implementation are not supported. This is primarily related
- * to being able to get Sets and Iterators of this map which require
- * significant overhead and code complexity to support and are
- * unnecessary for the requirements of this class.
- */
-
- /**
- * Intentionally unimplemented.
- */
- public Set<Map.Entry<K,V>> entrySet() {
- throw new UnsupportedOperationException(
- "entrySet() is intentionally unimplemented");
- }
-
- /**
- * Intentionally unimplemented.
- */
- public boolean equals(Object o) {
- throw new UnsupportedOperationException(
- "equals(Object) is intentionally unimplemented");
- }
-
- /**
- * Intentionally unimplemented.
- */
- public int hashCode() {
- throw new UnsupportedOperationException(
- "hashCode(Object) is intentionally unimplemented");
- }
-
- /**
- * Intentionally unimplemented.
- */
- public Set<K> keySet() {
- throw new UnsupportedOperationException(
- "keySet() is intentionally unimplemented");
- }
-
- /**
- * Intentionally unimplemented.
- */
- public void putAll(Map<? extends K, ? extends V> m) {
- throw new UnsupportedOperationException(
- "putAll() is intentionally unimplemented");
- }
-
- /**
- * Intentionally unimplemented.
- */
- public Collection<V> values() {
- throw new UnsupportedOperationException(
- "values() is intentionally unimplemented");
- }
-
- //--------------------------------------------------------------------------
- /**
- * Entry to store key/value mappings.
- * <p>
- * Contains previous and next pointers for the doubly linked-list which is
- * used for LRU eviction.
- * <p>
- * Instantiations of this class are memory aware. Both the key and value
- * classes used must also implement <code>HeapSize</code>.
- */
- protected static class Entry<K extends HeapSize, V extends HeapSize>
- implements Map.Entry<K,V>, HeapSize {
- /** The baseline overhead memory usage of this class */
- static final int OVERHEAD = 1 * Bytes.SIZEOF_LONG +
- 5 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT;
-
- /** The key */
- protected final K key;
- /** The value */
- protected V value;
- /** The hash value for this entries key */
- protected final int hash;
- /** The next entry in the hash chain (for collisions) */
- protected Entry<K,V> next;
-
- /** The previous entry in the LRU list (towards LRU) */
- protected Entry<K,V> prevPtr;
- /** The next entry in the LRU list (towards MRU) */
- protected Entry<K,V> nextPtr;
-
- /** The precomputed heap size of this entry */
- protected long heapSize;
-
- /**
- * Create a new entry.
- *
- * @param h the hash value of the key
- * @param k the key
- * @param v the value
- * @param nextChainPtr the next entry in the hash chain, null if none
- * @param prevLruPtr the previous entry in the LRU
- */
- Entry(int h, K k, V v, Entry<K,V> nextChainPtr, Entry<K,V> prevLruPtr) {
- value = v;
- next = nextChainPtr;
- key = k;
- hash = h;
- prevPtr = prevLruPtr;
- nextPtr = null;
- // Pre-compute heap size
- heapSize = OVERHEAD + k.heapSize() + v.heapSize();
- }
-
- /**
- * Get the key of this entry.
- *
- * @return the key associated with this entry
- */
- public K getKey() {
- return key;
- }
-
- /**
- * Get the value of this entry.
- *
- * @return the value currently associated with this entry
- */
- public V getValue() {
- return value;
- }
-
- /**
- * Set the value of this entry.
- *
- * It is not recommended to use this method when changing the value.
- * Rather, using <code>replaceValue</code> will return the difference
- * in heap usage between the previous and current values.
- *
- * @param newValue the new value to associate with this entry
- * @return the value previously associated with this entry
- */
- public V setValue(V newValue) {
- V oldValue = value;
- value = newValue;
- return oldValue;
- }
-
- /**
- * Replace the value of this entry.
- *
- * Computes and returns the difference in heap size when changing
- * the value associated with this entry.
- *
- * @param newValue the new value to associate with this entry
- * @return the change in heap usage of this entry in bytes
- */
- protected long replaceValue(V newValue) {
- long sizeDiff = newValue.heapSize() - value.heapSize();
- value = newValue;
- heapSize += sizeDiff;
- return sizeDiff;
- }
-
- /**
- * Returns true is the specified entry has the same key and the
- * same value as this entry.
- *
- * @param o entry to test against current
- * @return true is entries have equal key and value, false if no
- */
- public boolean equals(Object o) {
- if (!(o instanceof Map.Entry))
- return false;
- Map.Entry e = (Map.Entry)o;
- Object k1 = getKey();
- Object k2 = e.getKey();
- if (k1 == k2 || (k1 != null && k1.equals(k2))) {
- Object v1 = getValue();
- Object v2 = e.getValue();
- if (v1 == v2 || (v1 != null && v1.equals(v2)))
- return true;
- }
- return false;
- }
-
- /**
- * Returns the hash code of the entry by xor'ing the hash values
- * of the key and value of this entry.
- *
- * @return hash value of this entry
- */
- public int hashCode() {
- return (key.hashCode() ^ value.hashCode());
- }
-
- /**
- * Returns String representation of the entry in form "key=value"
- *
- * @return string value of entry
- */
- public String toString() {
- return getKey() + "=" + getValue();
- }
-
- //------------------------------------------------------------------------
- /**
- * Sets the previous pointer for the entry in the LRU.
- * @param prevPtr previous entry
- */
- protected void setPrevPtr(Entry<K,V> prevPtr){
- this.prevPtr = prevPtr;
- }
-
- /**
- * Returns the previous pointer for the entry in the LRU.
- * @return previous entry
- */
- protected Entry<K,V> getPrevPtr(){
- return prevPtr;
- }
-
- /**
- * Sets the next pointer for the entry in the LRU.
- * @param nextPtr next entry
- */
- protected void setNextPtr(Entry<K,V> nextPtr){
- this.nextPtr = nextPtr;
- }
-
- /**
- * Returns the next pointer for the entry in teh LRU.
- * @return next entry
- */
- protected Entry<K,V> getNextPtr(){
- return nextPtr;
- }
-
- /**
- * Returns the pre-computed and "deep" size of the Entry
- * @return size of the entry in bytes
- */
- public long heapSize() {
- return heapSize;
- }
- }
-}
-
-
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
deleted file mode 100644
index 4a3f52f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.security.User;
-
-/**
- * Executes region merge as a "transaction". It is similar with
- * SplitTransaction. Call {@link #prepare(RegionServerServices)} to setup the
- * transaction, {@link #execute(Server, RegionServerServices)} to run the
- * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if
- * execute fails.
- *
- * <p>Here is an example of how you would use this interface:
- * <pre>
- * RegionMergeTransactionFactory factory = new RegionMergeTransactionFactory(conf);
- * RegionMergeTransaction mt = factory.create(parent, midKey)
- * .registerTransactionListener(new TransactionListener() {
- * public void transition(RegionMergeTransaction transaction,
- * RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) throws IOException {
- * // ...
- * }
- * public void rollback(RegionMergeTransaction transaction,
- * RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) {
- * // ...
- * }
- * });
- * if (!mt.prepare()) return;
- * try {
- * mt.execute(server, services);
- * } catch (IOException ioe) {
- * try {
- * mt.rollback(server, services);
- * return;
- * } catch (RuntimeException e) {
- * // abort the server
- * }
- * }
- * </Pre>
- * <p>A merge transaction is not thread safe. Callers must ensure a split is run by
- * one thread only.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-@InterfaceStability.Evolving
-public interface RegionMergeTransaction {
- /**
- * Each enum is a step in the merge transaction.
- */
- enum RegionMergeTransactionPhase {
- STARTED,
- /**
- * Prepared
- */
- PREPARED,
- /**
- * Set region as in transition, set it into MERGING state.
- */
- SET_MERGING,
- /**
- * We created the temporary merge data directory.
- */
- CREATED_MERGE_DIR,
- /**
- * Closed the merging region A.
- */
- CLOSED_REGION_A,
- /**
- * The merging region A has been taken out of the server's online regions list.
- */
- OFFLINED_REGION_A,
- /**
- * Closed the merging region B.
- */
- CLOSED_REGION_B,
- /**
- * The merging region B has been taken out of the server's online regions list.
- */
- OFFLINED_REGION_B,
- /**
- * Started in on creation of the merged region.
- */
- STARTED_MERGED_REGION_CREATION,
- /**
- * Point of no return. If we got here, then transaction is not recoverable
- * other than by crashing out the regionserver.
- */
- PONR,
- /**
- * Completed
- */
- COMPLETED
- }
-
- /**
- * Split transaction journal entry
- */
- public interface JournalEntry {
-
- /** @return the completed phase marked by this journal entry */
- RegionMergeTransactionPhase getPhase();
-
- /** @return the time of phase completion */
- long getTimeStamp();
- }
-
- /**
- * Split transaction listener
- */
- public interface TransactionListener {
-
- /**
- * Invoked when transitioning forward from one transaction phase to another
- * @param transaction the transaction
- * @param from the current phase
- * @param to the next phase
- * @throws IOException listener can throw this to abort
- */
- void transition(RegionMergeTransaction transaction, RegionMergeTransactionPhase from,
- RegionMergeTransactionPhase to) throws IOException;
-
- /**
- * Invoked when rolling back a transaction from one transaction phase to the
- * previous
- * @param transaction the transaction
- * @param from the current phase
- * @param to the previous phase
- */
- void rollback(RegionMergeTransaction transaction, RegionMergeTransactionPhase from,
- RegionMergeTransactionPhase to);
- }
-
- /**
- * Check merge inputs and prepare the transaction.
- * @param services
- * @return <code>true</code> if the regions are mergeable else
- * <code>false</code> if they are not (e.g. its already closed, etc.).
- * @throws IOException
- */
- boolean prepare(RegionServerServices services) throws IOException;
-
- /**
- * Run the transaction.
- * @param server Hosting server instance. Can be null when testing
- * @param services Used to online/offline regions.
- * @throws IOException If thrown, transaction failed. Call
- * {@link #rollback(Server, RegionServerServices)}
- * @return merged region
- * @throws IOException
- * @see #rollback(Server, RegionServerServices)
- * @deprecated use #execute(Server, RegionServerServices, User)
- */
- @Deprecated
- Region execute(Server server, RegionServerServices services) throws IOException;
-
- /**
- * Run the transaction.
- * @param server Hosting server instance. Can be null when testing
- * @param services Used to online/offline regions.
- * @param user
- * @throws IOException If thrown, transaction failed. Call
- * {@link #rollback(Server, RegionServerServices)}
- * @return merged region
- * @throws IOException
- * @see #rollback(Server, RegionServerServices, User)
- */
- Region execute(Server server, RegionServerServices services, User user) throws IOException;
-
- /**
- * Roll back a failed transaction
- * @param server Hosting server instance (May be null when testing).
- * @param services Services of regionserver, used to online regions.
- * @throws IOException If thrown, rollback failed. Take drastic action.
- * @return True if we successfully rolled back, false if we got to the point
- * of no return and so now need to abort the server to minimize
- * damage.
- * @deprecated use #rollback(Server, RegionServerServices, User)
- */
- @Deprecated
- boolean rollback(Server server, RegionServerServices services) throws IOException;
-
- /**
- * Roll back a failed transaction
- * @param server Hosting server instance (May be null when testing).
- * @param services Services of regionserver, used to online regions.
- * @param user
- * @throws IOException If thrown, rollback failed. Take drastic action.
- * @return True if we successfully rolled back, false if we got to the point
- * of no return and so now need to abort the server to minimize
- * damage.
- */
- boolean rollback(Server server, RegionServerServices services, User user) throws IOException;
-
- /**
- * Register a listener for transaction preparation, execution, and possibly
- * rollback phases.
- * <p>A listener can abort a transaction by throwing an exception.
- * @param listener the listener
- * @return 'this' for chaining
- */
- RegionMergeTransaction registerTransactionListener(TransactionListener listener);
-
- /** @return merged region info */
- HRegionInfo getMergedRegionInfo();
-
- /**
- * Get the journal for the transaction.
- * <p>Journal entries are an opaque type represented as JournalEntry. They can
- * also provide useful debugging information via their toString method.
- * @return the transaction journal
- */
- List<JournalEntry> getJournal();
-
- /**
- * Get the Server running the transaction or rollback
- * @return server instance
- */
- Server getServer();
-
- /**
- * Get the RegonServerServices of the server running the transaction or rollback
- * @return region server services
- */
- RegionServerServices getRegionServerServices();
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
deleted file mode 100644
index 7c89f11..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALFactory;
-
-/**
- * Contains utility methods for manipulating HBase meta tables.
- * Be sure to call {@link #shutdown()} when done with this class so it closes
- * resources opened during meta processing (ROOT, META, etc.). Be careful
- * how you use this class. If used during migrations, be careful when using
- * this class to check whether migration is needed.
- */
-@InterfaceAudience.Private
-public class MetaUtils {
- private static final Log LOG = LogFactory.getLog(MetaUtils.class);
- private final Configuration conf;
- private final FSTableDescriptors descriptors;
- private FileSystem fs;
- private WALFactory walFactory;
- private HRegion metaRegion;
- private Map<byte [], HRegion> metaRegions = Collections.synchronizedSortedMap(
- new TreeMap<byte [], HRegion>(Bytes.BYTES_COMPARATOR));
-
- /** Default constructor
- * @throws IOException e
- */
- public MetaUtils() throws IOException {
- this(HBaseConfiguration.create());
- }
-
- /**
- * @param conf Configuration
- * @throws IOException e
- */
- public MetaUtils(Configuration conf) throws IOException {
- this.conf = conf;
- conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
- this.metaRegion = null;
- this.descriptors = new FSTableDescriptors(conf);
- initialize();
- }
-
- /**
- * Verifies that DFS is available and that HBase is off-line.
- * @throws IOException e
- */
- private void initialize() throws IOException {
- this.fs = FileSystem.get(this.conf);
- }
-
- /**
- * @return the WAL associated with the given region
- * @throws IOException e
- */
- public synchronized WAL getLog(HRegionInfo info) throws IOException {
- if (this.walFactory == null) {
- String logName =
- HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis();
- final Configuration walConf = new Configuration(this.conf);
- FSUtils.setRootDir(walConf, fs.getHomeDirectory());
- this.walFactory = new WALFactory(walConf, null, logName);
- }
- final byte[] region = info.getEncodedNameAsBytes();
- final byte[] namespace = info.getTable().getNamespace();
- return info.isMetaRegion() ? walFactory.getMetaWAL(region) : walFactory.getWAL(region,
- namespace);
- }
-
- /**
- * @return HRegion for meta region
- * @throws IOException e
- */
- public synchronized HRegion getMetaRegion() throws IOException {
- return this.metaRegion == null? openMetaRegion(): this.metaRegion;
- }
-
- /**
- * Closes catalog regions if open. Also closes and deletes the WAL. You
- * must call this method if you want to persist changes made during a
- * MetaUtils edit session.
- */
- public synchronized void shutdown() {
- if (this.metaRegion != null) {
- try {
- this.metaRegion.close();
- } catch (IOException e) {
- LOG.error("closing meta region", e);
- } finally {
- this.metaRegion = null;
- }
- }
- try {
- for (HRegion r: metaRegions.values()) {
- LOG.info("CLOSING hbase:meta " + r.toString());
- r.close();
- }
- } catch (IOException e) {
- LOG.error("closing meta region", e);
- } finally {
- metaRegions.clear();
- }
- try {
- if (this.walFactory != null) {
- this.walFactory.close();
- }
- } catch (IOException e) {
- LOG.error("closing WAL", e);
- }
- }
-
- private synchronized HRegion openMetaRegion() throws IOException {
- if (this.metaRegion != null) {
- return this.metaRegion;
- }
- this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
- descriptors.get(TableName.META_TABLE_NAME), getLog(HRegionInfo.FIRST_META_REGIONINFO),
- this.conf);
- this.metaRegion.compactStores();
- return this.metaRegion;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
deleted file mode 100644
index 05e0f49..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Simple {@link java.util.SortedSet} implementation that uses an internal
- * {@link java.util.TreeSet} to provide ordering. All mutation operations
- * create a new copy of the <code>TreeSet</code> instance, so are very
- * expensive. This class is only intended for use on small, very rarely
- * written collections that expect highly concurrent reads. Read operations
- * are performed on a reference to the internal <code>TreeSet</code> at the
- * time of invocation, so will not see any mutations to the collection during
- * their operation.
- *
- * <p>Note that due to the use of a {@link java.util.TreeSet} internally,
- * a {@link java.util.Comparator} instance must be provided, or collection
- * elements must implement {@link java.lang.Comparable}.
- * </p>
- * @param <E> A class implementing {@link java.lang.Comparable} or able to be
- * compared by a provided comparator.
- */
-@InterfaceAudience.Private
-public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
- private volatile SortedSet<E> internalSet;
-
- public SortedCopyOnWriteSet() {
- this.internalSet = new TreeSet<>();
- }
-
- public SortedCopyOnWriteSet(Collection<? extends E> c) {
- this.internalSet = new TreeSet<>(c);
- }
-
- public SortedCopyOnWriteSet(Comparator<? super E> comparator) {
- this.internalSet = new TreeSet<>(comparator);
- }
-
- @Override
- public int size() {
- return internalSet.size();
- }
-
- @Override
- public boolean isEmpty() {
- return internalSet.isEmpty();
- }
-
- @Override
- public boolean contains(Object o) {
- return internalSet.contains(o);
- }
-
- @Override
- public Iterator<E> iterator() {
- return internalSet.iterator();
- }
-
- @Override
- public Object[] toArray() {
- return internalSet.toArray();
- }
-
- @Override
- public <T> T[] toArray(T[] a) {
- return internalSet.toArray(a);
- }
-
- @Override
- public synchronized boolean add(E e) {
- SortedSet<E> newSet = new TreeSet<>(internalSet);
- boolean added = newSet.add(e);
- internalSet = newSet;
- return added;
- }
-
- @Override
- public synchronized boolean remove(Object o) {
- SortedSet<E> newSet = new TreeSet<>(internalSet);
- boolean removed = newSet.remove(o);
- internalSet = newSet;
- return removed;
- }
-
- @Override
- public boolean containsAll(Collection<?> c) {
- return internalSet.containsAll(c);
- }
-
- @Override
- public synchronized boolean addAll(Collection<? extends E> c) {
- SortedSet<E> newSet = new TreeSet<>(internalSet);
- boolean changed = newSet.addAll(c);
- internalSet = newSet;
- return changed;
- }
-
- @Override
- public synchronized boolean retainAll(Collection<?> c) {
- SortedSet<E> newSet = new TreeSet<>(internalSet);
- boolean changed = newSet.retainAll(c);
- internalSet = newSet;
- return changed;
- }
-
- @Override
- public synchronized boolean removeAll(Collection<?> c) {
- SortedSet<E> newSet = new TreeSet<>(internalSet);
- boolean changed = newSet.removeAll(c);
- internalSet = newSet;
- return changed;
- }
-
- @Override
- public synchronized void clear() {
- Comparator<? super E> comparator = internalSet.comparator();
- if (comparator != null) {
- internalSet = new TreeSet<>(comparator);
- } else {
- internalSet = new TreeSet<>();
- }
- }
-
- @Override
- public Comparator<? super E> comparator() {
- return internalSet.comparator();
- }
-
- @Override
- public SortedSet<E> subSet(E fromElement, E toElement) {
- return internalSet.subSet(fromElement, toElement);
- }
-
- @Override
- public SortedSet<E> headSet(E toElement) {
- return internalSet.headSet(toElement);
- }
-
- @Override
- public SortedSet<E> tailSet(E fromElement) {
- return internalSet.tailSet(fromElement);
- }
-
- @Override
- public E first() {
- return internalSet.first();
- }
-
- @Override
- public E last() {
- return internalSet.last();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 2f33859..6b943a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -65,7 +65,7 @@ public class TestHeapSize {
private static final Log LOG = LogFactory.getLog(TestHeapSize.class);
// List of classes implementing HeapSize
// BatchOperation, BatchUpdate, BlockIndex, Entry, Entry<K,V>, HStoreKey
- // KeyValue, LruBlockCache, LruHashMap<K,V>, Put, WALKey
+ // KeyValue, LruBlockCache, Put, WALKey
@BeforeClass
public static void beforeClass() throws Exception {
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 4c8728f..b78bfd1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -82,7 +82,6 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
-import org.apache.hadoop.hbase.exceptions.OperationConflictException;
import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
@@ -396,7 +395,7 @@ public class TestDistributedLogSplitting {
try {
ht.increment(incr);
fail("should have thrown");
- } catch (OperationConflictException ope) {
+ } catch (IOException ope) {
LOG.debug("Caught as expected: " + ope.getMessage());
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
index 1505fc1..f41a5cc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.exceptions.OperationConflictException;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
import org.apache.hadoop.util.StringUtils;
@@ -297,7 +296,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
}
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
} catch (IOException e) {
- if (ignoreNonceConflicts && (e instanceof OperationConflictException)) {
+ if (ignoreNonceConflicts) {
LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
return;
[23/23] hbase git commit: HBASE-16755 Honor flush policy under global
memstore pressure
Posted by sy...@apache.org.
HBASE-16755 Honor flush policy under global memstore pressure
Signed-off-by: Gary Helmling <ga...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4b62a52e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4b62a52e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4b62a52e
Branch: refs/heads/hbase-12439
Commit: 4b62a52ebcf401d872e6872cf25bdb4556758983
Parents: 85fda44
Author: Ashu Pachauri <as...@gmail.com>
Authored: Mon Nov 28 17:49:48 2016 -0800
Committer: Gary Helmling <ga...@apache.org>
Committed: Mon Mar 27 11:47:20 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/4b62a52e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 7fddb36..aaa9572 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -203,7 +203,7 @@ class MemStoreFlusher implements FlushRequester {
+ humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize())
+ ", Region memstore size="
+ humanReadableInt(regionToFlush.getMemstoreSize()));
- flushedOne = flushRegion(regionToFlush, true, true);
+ flushedOne = flushRegion(regionToFlush, true, false);
if (!flushedOne) {
LOG.info("Excluding unflushable region " + regionToFlush +
[02/23] hbase git commit: HBASE-17582 Fix broken drop page cache hint
(broken by HBASE-15236).
Posted by sy...@apache.org.
HBASE-17582 Fix broken drop page cache hint (broken by HBASE-15236).
Change-Id: I2947ab979979f977db7b0c282c4aaf4eb1f26482
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e39e0e63
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e39e0e63
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e39e0e63
Branch: refs/heads/hbase-12439
Commit: e39e0e634a2252a352ad799bc2957c72e8d2d2e9
Parents: 55d6dca
Author: Apekshit Sharma <ap...@apache.org>
Authored: Wed Feb 1 23:23:46 2017 -0800
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Mon Mar 20 11:19:51 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/regionserver/StoreFileScanner.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e39e0e63/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index ca7dfd4..ab6b0ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -129,7 +129,7 @@ public class StoreFileScanner implements KeyValueScanner {
List<StoreFile> sorted_files = new ArrayList<>(files);
Collections.sort(sorted_files, StoreFile.Comparators.SEQ_ID);
for (int i = 0; i < sorted_files.size(); i++) {
- StoreFileReader r = sorted_files.get(i).createReader();
+ StoreFileReader r = sorted_files.get(i).createReader(canUseDrop);
r.setReplicaStoreFile(isPrimaryReplica);
StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt,
i, matcher != null ? !matcher.hasNullColumnInQuery() : false);
[11/23] hbase git commit: HBASE-17807 correct the value of
zookeeper.session.timeout in hbase doc
Posted by sy...@apache.org.
HBASE-17807 correct the value of zookeeper.session.timeout in hbase doc
Signed-off-by: tedyu <yu...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/94107093
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/94107093
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/94107093
Branch: refs/heads/hbase-12439
Commit: 941070939f4bf65536ee74b9f62d3b5114da826b
Parents: 11dc5bf
Author: chenyechao <ch...@gmail.com>
Authored: Mon Mar 20 14:28:42 2017 +0800
Committer: tedyu <yu...@gmail.com>
Committed: Tue Mar 21 18:53:09 2017 -0700
----------------------------------------------------------------------
src/main/asciidoc/_chapters/troubleshooting.adoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/94107093/src/main/asciidoc/_chapters/troubleshooting.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc b/src/main/asciidoc/_chapters/troubleshooting.adoc
index e1d1717..1cf93d6 100644
--- a/src/main/asciidoc/_chapters/troubleshooting.adoc
+++ b/src/main/asciidoc/_chapters/troubleshooting.adoc
@@ -1050,7 +1050,7 @@ If you wish to increase the session timeout, add the following to your _hbase-si
----
<property>
<name>zookeeper.session.timeout</name>
- <value>1200000</value>
+ <value>120000</value>
</property>
<property>
<name>hbase.zookeeper.property.tickTime</name>
[09/23] hbase git commit: HBASE-17798 RpcServer.Listener.Reader can
abort due to CancelledKeyException (Guangxu Cheng)
Posted by sy...@apache.org.
HBASE-17798 RpcServer.Listener.Reader can abort due to CancelledKeyException (Guangxu Cheng)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1cfd22bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1cfd22bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1cfd22bf
Branch: refs/heads/hbase-12439
Commit: 1cfd22bf43c9b64afae35d9bf16f764d0da80cab
Parents: 8f4ae0a
Author: tedyu <yu...@gmail.com>
Authored: Tue Mar 21 06:59:29 2017 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue Mar 21 06:59:29 2017 -0700
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/1cfd22bf/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 9e1e81e..5f90d50 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -302,7 +302,8 @@ public class SimpleRpcServer extends RpcServer {
if (running) { // unexpected -- log it
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
}
- return;
+ } catch (CancelledKeyException e) {
+ LOG.error(getName() + ": CancelledKeyException in Reader", e);
} catch (IOException ex) {
LOG.info(getName() + ": IOException in Reader", ex);
}
[05/23] hbase git commit: HBASE-16014 Get and Put constructor
argument lists are divergent
Posted by sy...@apache.org.
HBASE-16014 Get and Put constructor argument lists are divergent
Signed-off-by: CHIA-PING TSAI <ch...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c8f02e4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c8f02e4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c8f02e4
Branch: refs/heads/hbase-12439
Commit: 9c8f02e4ef3037e8eaf649360ce83a898c3b20e1
Parents: a41b185
Author: brandboat <br...@gmail.com>
Authored: Sat Mar 18 18:02:42 2017 +0800
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Tue Mar 21 09:33:24 2017 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/client/Get.java | 22 ++++++++++++++++++++
.../org/apache/hadoop/hbase/client/TestGet.java | 12 +++++++++++
2 files changed, 34 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8f02e4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index a581ed5..3771aff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -130,6 +131,27 @@ public class Get extends Query
}
}
+ /**
+ * Create a Get operation for the specified row.
+ * @param row
+ * @param rowOffset
+ * @param rowLength
+ */
+ public Get(byte[] row, int rowOffset, int rowLength) {
+ Mutation.checkRow(row, rowOffset, rowLength);
+ this.row = Bytes.copy(row, rowOffset, rowLength);
+ }
+
+ /**
+ * Create a Get operation for the specified row.
+ * @param row
+ */
+ public Get(ByteBuffer row) {
+ Mutation.checkRow(row);
+ this.row = new byte[row.remaining()];
+ row.get(this.row);
+ }
+
public boolean isCheckExistenceOnly() {
return checkExistenceOnly;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8f02e4/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
index 810f6bc..6a2bb39 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.nio.ByteBuffer;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import java.util.List;
@@ -241,4 +242,15 @@ public class TestGet {
assertEquals("my.MockFilter", filters.get(1).getClass().getName());
assertTrue(filters.get(2) instanceof KeyOnlyFilter);
}
+
+ @Test
+ public void testGetRowConstructor() {
+ byte[] row1 = Bytes.toBytes("testRow");
+ byte[] row2 = Bytes.toBytes("testtestRow");
+ ByteBuffer rowBuffer = ByteBuffer.allocate(16);
+ rowBuffer = ByteBuffer.wrap(row1);
+ Get get1 = new Get(rowBuffer);
+ Get get2 = new Get(row2, 4, 7);
+ Assert.assertArrayEquals(get1.getRow(), get2.getRow());
+ }
}
[16/23] hbase git commit: HBASE-17669: Implement async
mergeRegion/splitRegion methods
Posted by sy...@apache.org.
HBASE-17669: Implement async mergeRegion/splitRegion methods
Signed-off-by: zhangduo <zh...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/faf81d51
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/faf81d51
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/faf81d51
Branch: refs/heads/hbase-12439
Commit: faf81d5133393656a21ce8614447e4bb2b0d04e3
Parents: f1c1f25
Author: huzheng <op...@gmail.com>
Authored: Thu Mar 2 11:26:20 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Fri Mar 24 21:25:24 2017 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/HRegionInfo.java | 14 ++
.../apache/hadoop/hbase/client/AsyncAdmin.java | 36 +++
.../hadoop/hbase/client/AsyncHBaseAdmin.java | 242 +++++++++++++++++++
.../apache/hadoop/hbase/client/HBaseAdmin.java | 15 +-
.../hbase/client/TestAsyncRegionAdminApi.java | 96 ++++++++
5 files changed, 389 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/faf81d51/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 045f866..b98d210 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.util.HashKey;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.util.StringUtils;
/**
* Information about a region. A region is a range of keys in the whole keyspace of a table, an
@@ -582,6 +583,19 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
return elements;
}
+ public static boolean isEncodedRegionName(byte[] regionName) throws IOException {
+ try {
+ HRegionInfo.parseRegionName(regionName);
+ return false;
+ } catch (IOException e) {
+ if (StringUtils.stringifyException(e)
+ .contains(HRegionInfo.INVALID_REGION_NAME_FORMAT_MESSAGE)) {
+ return true;
+ }
+ throw e;
+ }
+ }
+
/** @return the regionId */
public long getRegionId(){
return regionId;
http://git-wip-us.apache.org/repos/asf/hbase/blob/faf81d51/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 630ae47..5a13ede 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -364,4 +364,40 @@ public interface AsyncAdmin {
* @param hri
*/
CompletableFuture<Void> closeRegion(ServerName sn, HRegionInfo hri);
+
+ /**
+ * Merge two regions.
+ * @param nameOfRegionA encoded or full name of region a
+ * @param nameOfRegionB encoded or full name of region b
+ * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent
+ * regions
+ */
+ CompletableFuture<Void> mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB,
+ final boolean forcible);
+
+ /**
+ * Split a table. The method will execute split action for each region in table.
+ * @param tableName table to split
+ */
+ CompletableFuture<Void> split(final TableName tableName);
+
+ /**
+ * Split an individual region.
+ * @param regionName region to split
+ */
+ CompletableFuture<Void> splitRegion(final byte[] regionName);
+
+ /**
+ * Split a table.
+ * @param tableName table to split
+ * @param splitPoint the explicit position to split on
+ */
+ CompletableFuture<Void> split(final TableName tableName, final byte[] splitPoint);
+
+ /**
+ * Split an individual region.
+ * @param regionName region to split
+ * @param splitPoint the explicit position to split on
+ */
+ CompletableFuture<Void> splitRegion(final byte[] regionName, final byte[] splitPoint);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/faf81d51/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index e2dc3d5..5ae30d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.regex.Pattern;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -35,6 +39,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
@@ -49,6 +54,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
import org.apache.hadoop.hbase.client.Scan.ReadType;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -56,6 +62,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
@@ -90,6 +98,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancer
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
@@ -670,6 +680,227 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
.serverName(sn).call();
}
+ private byte[] toEncodeRegionName(byte[] regionName) {
+ try {
+ return HRegionInfo.isEncodedRegionName(regionName) ? regionName
+ : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName));
+ } catch (IOException e) {
+ return regionName;
+ }
+ }
+
+ private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference<TableName> tableName,
+ CompletableFuture<TableName> result) {
+ getRegion(encodeRegionName).whenComplete((p, err) -> {
+ if (err != null) {
+ result.completeExceptionally(err);
+ return;
+ }
+ if (p == null) {
+ result.completeExceptionally(new UnknownRegionException(
+ "Can't invoke merge on unknown region " + Bytes.toStringBinary(encodeRegionName)));
+ return;
+ }
+ if (p.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+ result.completeExceptionally(
+ new IllegalArgumentException("Can't invoke merge on non-default regions directly"));
+ return;
+ }
+ if (!tableName.compareAndSet(null, p.getFirst().getTable())) {
+ if (!tableName.get().equals(p.getFirst().getTable())) {
+ // tables of this two region should be same.
+ result.completeExceptionally(
+ new IllegalArgumentException("Cannot merge regions from two different tables "
+ + tableName.get() + " and " + p.getFirst().getTable()));
+ } else {
+ result.complete(tableName.get());
+ }
+ }
+ });
+ }
+
+ private CompletableFuture<TableName> checkRegionsAndGetTableName(byte[] encodeRegionNameA,
+ byte[] encodeRegionNameB) {
+ AtomicReference<TableName> tableNameRef = new AtomicReference<>();
+ CompletableFuture<TableName> future = new CompletableFuture<>();
+
+ checkAndGetTableName(encodeRegionNameA, tableNameRef, future);
+ checkAndGetTableName(encodeRegionNameB, tableNameRef, future);
+ return future;
+ }
+
+ @Override
+ public CompletableFuture<Void> mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB,
+ boolean forcible) {
+ CompletableFuture<Void> future = new CompletableFuture<>();
+ final byte[] encodeRegionNameA = toEncodeRegionName(nameOfRegionA);
+ final byte[] encodeRegionNameB = toEncodeRegionName(nameOfRegionB);
+
+ checkRegionsAndGetTableName(encodeRegionNameA, encodeRegionNameB)
+ .whenComplete((tableName, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ return;
+ }
+
+ MergeTableRegionsRequest request = null;
+ try {
+ request = RequestConverter.buildMergeTableRegionsRequest(
+ new byte[][] { encodeRegionNameA, encodeRegionNameB }, forcible, ng.getNonceGroup(),
+ ng.newNonce());
+ } catch (DeserializationException e) {
+ future.completeExceptionally(e);
+ return;
+ }
+
+ this.<MergeTableRegionsRequest, MergeTableRegionsResponse> procedureCall(request,
+ (s, c, req, done) -> s.mergeTableRegions(c, req, done), (resp) -> resp.getProcId(),
+ new MergeTableRegionProcedureBiConsumer(this, tableName)).whenComplete((ret, err2) -> {
+ if (err2 != null) {
+ future.completeExceptionally(err2);
+ } else {
+ future.complete(ret);
+ }
+ });
+
+ });
+ return future;
+ }
+
+ @Override
+ public CompletableFuture<Void> split(TableName tableName) {
+ CompletableFuture<Void> future = new CompletableFuture<>();
+ tableExists(tableName).whenComplete((exist, error) -> {
+ if (error != null) {
+ future.completeExceptionally(error);
+ return;
+ }
+ if (!exist) {
+ future.completeExceptionally(new TableNotFoundException(tableName));
+ return;
+ }
+ metaTable
+ .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)
+ .withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION))
+ .withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION)))
+ .whenComplete((results, err2) -> {
+ if (err2 != null) {
+ future.completeExceptionally(err2);
+ return;
+ }
+ if (results != null && !results.isEmpty()) {
+ List<CompletableFuture<Void>> splitFutures = new ArrayList<>();
+ for (Result r : results) {
+ if (r.isEmpty() || MetaTableAccessor.getHRegionInfo(r) == null) continue;
+ RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
+ if (rl != null) {
+ for (HRegionLocation h : rl.getRegionLocations()) {
+ if (h != null && h.getServerName() != null) {
+ HRegionInfo hri = h.getRegionInfo();
+ if (hri == null || hri.isSplitParent()
+ || hri.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
+ continue;
+ splitFutures.add(split(h.getServerName(), hri, null));
+ }
+ }
+ }
+ }
+ CompletableFuture
+ .allOf(splitFutures.toArray(new CompletableFuture<?>[splitFutures.size()]))
+ .whenComplete((ret, exception) -> {
+ if (exception != null) {
+ future.completeExceptionally(exception);
+ return;
+ }
+ future.complete(ret);
+ });
+ } else {
+ future.complete(null);
+ }
+ });
+ });
+ return future;
+ }
+
+ @Override
+ public CompletableFuture<Void> splitRegion(byte[] regionName) {
+ return splitRegion(regionName, null);
+ }
+
+ @Override
+ public CompletableFuture<Void> split(TableName tableName, byte[] splitPoint) {
+ CompletableFuture<Void> result = new CompletableFuture<>();
+ if (splitPoint == null) {
+ return failedFuture(new IllegalArgumentException("splitPoint can not be null."));
+ }
+ connection.getRegionLocator(tableName).getRegionLocation(splitPoint)
+ .whenComplete((loc, err) -> {
+ if (err != null) {
+ result.completeExceptionally(err);
+ } else if (loc == null || loc.getRegionInfo() == null) {
+ result.completeExceptionally(new IllegalArgumentException(
+ "Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint)));
+ } else {
+ splitRegion(loc.getRegionInfo().getRegionName(), splitPoint)
+ .whenComplete((ret, err2) -> {
+ if (err2 != null) {
+ result.completeExceptionally(err2);
+ } else {
+ result.complete(ret);
+ }
+
+ });
+ }
+ });
+ return result;
+ }
+
+ @Override
+ public CompletableFuture<Void> splitRegion(byte[] regionName, byte[] splitPoint) {
+ CompletableFuture<Void> future = new CompletableFuture<>();
+ getRegion(regionName).whenComplete((p, err) -> {
+ if (p == null) {
+ future.completeExceptionally(
+ new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)));
+ return;
+ }
+ if (p.getFirst() != null && p.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+ future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
+ + "Replicas are auto-split when their primary is split."));
+ return;
+ }
+ if (p.getSecond() == null) {
+ future.completeExceptionally(
+ new NoServerForRegionException(Bytes.toStringBinary(regionName)));
+ return;
+ }
+ split(p.getSecond(), p.getFirst(), splitPoint).whenComplete((ret, err2) -> {
+ if (err2 != null) {
+ future.completeExceptionally(err2);
+ } else {
+ future.complete(ret);
+ }
+ });
+ });
+ return future;
+ }
+
+ @VisibleForTesting
+ public CompletableFuture<Void> split(final ServerName sn, final HRegionInfo hri,
+ byte[] splitPoint) {
+ if (hri.getStartKey() != null && splitPoint != null
+ && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
+ return failedFuture(
+ new IllegalArgumentException("should not give a splitkey which equals to startkey!"));
+ }
+ return this.<Void> newAdminCaller()
+ .action(
+ (controller, stub) -> this.<SplitRegionRequest, SplitRegionResponse, Void> adminCall(
+ controller, stub, ProtobufUtil.buildSplitRegionRequest(hri.getRegionName(), splitPoint),
+ (s, c, req, done) -> s.splitRegion(controller, req, done), resp -> null))
+ .serverName(sn).call();
+ }
+
private byte[][] getSplitKeys(byte[] startKey, byte[] endKey, int numRegions) {
if (numRegions < 3) {
throw new IllegalArgumentException("Must create at least three regions");
@@ -885,6 +1116,17 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
}
}
+ private class MergeTableRegionProcedureBiConsumer extends TableProcedureBiConsumer {
+
+ MergeTableRegionProcedureBiConsumer(AsyncAdmin admin, TableName tableName) {
+ super(admin, tableName);
+ }
+
+ String getOperationType() {
+ return "MERGE_REGIONS";
+ }
+ }
+
private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) {
CompletableFuture<Void> future = new CompletableFuture<>();
procFuture.whenComplete((procId, error) -> {
http://git-wip-us.apache.org/repos/asf/hbase/blob/faf81d51/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 1368038..155a272 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1497,19 +1497,6 @@ public class HBaseAdmin implements Admin {
});
}
- private boolean isEncodedRegionName(byte[] regionName) throws IOException {
- try {
- HRegionInfo.parseRegionName(regionName);
- return false;
- } catch (IOException e) {
- if (StringUtils.stringifyException(e)
- .contains(HRegionInfo.INVALID_REGION_NAME_FORMAT_MESSAGE)) {
- return true;
- }
- throw e;
- }
- }
-
/**
* Merge two regions. Synchronous operation.
* Note: It is not feasible to predict the length of merge.
@@ -1582,7 +1569,7 @@ public class HBaseAdmin implements Admin {
assert(nameofRegionsToMerge.length >= 2);
byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][];
for(int i = 0; i < nameofRegionsToMerge.length; i++) {
- encodedNameofRegionsToMerge[i] = isEncodedRegionName(nameofRegionsToMerge[i]) ?
+ encodedNameofRegionsToMerge[i] = HRegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ?
nameofRegionsToMerge[i] : HRegionInfo.encodeRegionName(nameofRegionsToMerge[i]).getBytes();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/faf81d51/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 383b28f..980e07a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -17,10 +17,12 @@
*/
package org.apache.hadoop.hbase.client;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -200,4 +202,98 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
}
}
+
+ @Test
+ public void testMergeRegions() throws Exception {
+ final TableName tableName = TableName.valueOf("testMergeRegions");
+ HColumnDescriptor cd = new HColumnDescriptor("d");
+ HTableDescriptor td = new HTableDescriptor(tableName);
+ td.addFamily(cd);
+ byte[][] splitRows = new byte[][] { "3".getBytes(), "6".getBytes() };
+ Admin syncAdmin = TEST_UTIL.getAdmin();
+ try {
+ TEST_UTIL.createTable(td, splitRows);
+ TEST_UTIL.waitTableAvailable(tableName);
+
+ List<HRegionInfo> tableRegions;
+ HRegionInfo regionA;
+ HRegionInfo regionB;
+
+ // merge with full name
+ tableRegions = syncAdmin.getTableRegions(tableName);
+ assertEquals(3, syncAdmin.getTableRegions(tableName).size());
+ regionA = tableRegions.get(0);
+ regionB = tableRegions.get(1);
+ admin.mergeRegions(regionA.getRegionName(), regionB.getRegionName(), false).get();
+
+ assertEquals(2, syncAdmin.getTableRegions(tableName).size());
+
+ // merge with encoded name
+ tableRegions = syncAdmin.getTableRegions(tableName);
+ regionA = tableRegions.get(0);
+ regionB = tableRegions.get(1);
+ admin.mergeRegions(regionA.getRegionName(), regionB.getRegionName(), false).get();
+
+ assertEquals(1, syncAdmin.getTableRegions(tableName).size());
+ } finally {
+ syncAdmin.disableTable(tableName);
+ syncAdmin.deleteTable(tableName);
+ }
+ }
+
+ @Test
+ public void testSplitTable() throws Exception {
+ splitTests(TableName.valueOf("testSplitTable"), 3000, false, null);
+ splitTests(TableName.valueOf("testSplitTableWithSplitPoint"), 3000, false, Bytes.toBytes("3"));
+ splitTests(TableName.valueOf("testSplitRegion"), 3000, true, null);
+ splitTests(TableName.valueOf("testSplitRegionWithSplitPoint"), 3000, true, Bytes.toBytes("3"));
+ }
+
+ private void splitTests(TableName tableName, int rowCount, boolean isSplitRegion,
+ byte[] splitPoint) throws Exception {
+ int count = 0;
+ // create table
+ HColumnDescriptor cd = new HColumnDescriptor("d");
+ HTableDescriptor td = new HTableDescriptor(tableName);
+ td.addFamily(cd);
+ Table table = TEST_UTIL.createTable(td, null);
+ TEST_UTIL.waitTableAvailable(tableName);
+
+ List<HRegionInfo> regions = TEST_UTIL.getAdmin().getTableRegions(tableName);
+ assertEquals(regions.size(), 1);
+
+ List<Put> puts = new ArrayList<>();
+ for (int i = 0; i < rowCount; i++) {
+ Put put = new Put(Bytes.toBytes(i));
+ put.addColumn(Bytes.toBytes("d"), null, Bytes.toBytes("value" + i));
+ puts.add(put);
+ }
+ table.put(puts);
+
+ if (isSplitRegion) {
+ admin.splitRegion(regions.get(0).getRegionName(), splitPoint).get();
+ } else {
+ if (splitPoint == null) {
+ admin.split(tableName).get();
+ } else {
+ admin.split(tableName, splitPoint).get();
+ }
+ }
+
+ for (int i = 0; i < 45; i++) {
+ try {
+ List<HRegionInfo> hRegionInfos = TEST_UTIL.getAdmin().getTableRegions(tableName);
+ count = hRegionInfos.size();
+ if (count >= 2) {
+ break;
+ }
+ Thread.sleep(1000L);
+ } catch (Exception e) {
+ LOG.error(e);
+ }
+ }
+
+ assertEquals(count, 2);
+ }
+
}
[03/23] hbase git commit: Added hbase high performance cookbook to
the book resources page on the website
Posted by sy...@apache.org.
Added hbase high performance cookbook to the book resources page on the website
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16900c8c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16900c8c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16900c8c
Branch: refs/heads/hbase-12439
Commit: 16900c8c25766456aeb624c19d50ee0c203facfa
Parents: e39e0e6
Author: Michael Stack <st...@apache.org>
Authored: Mon Mar 20 12:01:22 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Mon Mar 20 12:01:22 2017 -0700
----------------------------------------------------------------------
src/main/site/xdoc/resources.xml | 4 ++++
1 file changed, 4 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/16900c8c/src/main/site/xdoc/resources.xml
----------------------------------------------------------------------
diff --git a/src/main/site/xdoc/resources.xml b/src/main/site/xdoc/resources.xml
index d067c1e..078587c 100644
--- a/src/main/site/xdoc/resources.xml
+++ b/src/main/site/xdoc/resources.xml
@@ -37,6 +37,10 @@ under the License.
<p><a href="http://www.packtpub.com/hbase-administration-for-optimum-database-performance-cookbook/book">HBase Administration Cookbook</a> by Yifeng Jiang. Publisher: PACKT Publishing, Release: Expected August 2012, Pages: 335.</p>
</section>
</section>
+<section name="HBase High Performance Cookbook">
+ <p><a href="https://www.packtpub.com/big-data-and-business-intelligence/hbase-high-performance-cookbook">HBase High Performance Cookbook</a> by Ruchir Choudhry. Publisher: PACKT Publishing, Release: January 2017, Pages: 350.</p>
+</section>
+</section>
</section>
</body>
</document>
[21/23] hbase git commit: Reviving the merge of the compacting
pipeline: making the limit on the number of the segments in the pipeline
configurable, adding merge test, fixing bug in sizes counting
Posted by sy...@apache.org.
Reviving the merge of the compacting pipeline: making the limit on the number of the segments in the pipeline configurable, adding merge test, fixing bug in sizes counting
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c77e2135
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c77e2135
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c77e2135
Branch: refs/heads/hbase-12439
Commit: c77e2135db07b6417f5fea4577c2c7ae8d6d7008
Parents: 04fc455
Author: anastas <an...@yahoo-inc.com>
Authored: Mon Mar 27 15:41:32 2017 +0300
Committer: anastas <an...@yahoo-inc.com>
Committed: Mon Mar 27 15:41:32 2017 +0300
----------------------------------------------------------------------
.../hbase/regionserver/CompactingMemStore.java | 9 ++-
.../hbase/regionserver/CompactionPipeline.java | 8 +-
.../hbase/regionserver/ImmutableSegment.java | 4 +-
.../hbase/regionserver/MemStoreCompactor.java | 21 +++--
.../TestWalAndCompactingMemStoreFlush.java | 83 +++++++++++---------
5 files changed, 75 insertions(+), 50 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/c77e2135/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 26b2f49..0c56693 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -279,7 +279,8 @@ public class CompactingMemStore extends AbstractMemStore {
public boolean swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result,
boolean merge) {
- return pipeline.swap(versionedList, result, !merge);
+ // last true stands for updating the region size
+ return pipeline.swap(versionedList, result, !merge, true);
}
/**
@@ -437,7 +438,8 @@ public class CompactingMemStore extends AbstractMemStore {
private void pushTailToSnapshot() {
VersionedSegmentsList segments = pipeline.getVersionedTail();
pushToSnapshot(segments.getStoreSegments());
- pipeline.swap(segments,null,false); // do not close segments as they are in snapshot now
+ // In Swap: don't close segments (they are in snapshot now) and don't update the region size
+ pipeline.swap(segments,null,false, false);
}
private void pushPipelineToSnapshot() {
@@ -449,7 +451,8 @@ public class CompactingMemStore extends AbstractMemStore {
pushToSnapshot(segments.getStoreSegments());
// swap can return false in case the pipeline was updated by ongoing compaction
// and the version increase, the chance of it happenning is very low
- done = pipeline.swap(segments, null, false); // don't close segments; they are in snapshot now
+ // In Swap: don't close segments (they are in snapshot now) and don't update the region size
+ done = pipeline.swap(segments, null, false, false);
if (iterationsCnt>2) {
// practically it is impossible that this loop iterates more than two times
// (because the compaction is stopped and none restarts it while in snapshot request),
http://git-wip-us.apache.org/repos/asf/hbase/blob/c77e2135/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index e64c0fb..06e83a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -106,12 +106,16 @@ public class CompactionPipeline {
* removed.
* @param closeSuffix whether to close the suffix (to release memory), as part of swapping it out
* During index merge op this will be false and for compaction it will be true.
+ * @param updateRegionSize whether to update the region size. Update the region size,
+ * when the pipeline is swapped as part of in-memory-flush and
+ * further merge/compaction. Don't update the region size when the
+ * swap is result of the snapshot (flush-to-disk).
* @return true iff swapped tail with new segment
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="VO_VOLATILE_INCREMENT",
justification="Increment is done under a synchronize block so safe")
public boolean swap(VersionedSegmentsList versionedList, ImmutableSegment segment,
- boolean closeSuffix) {
+ boolean closeSuffix, boolean updateRegionSize) {
if (versionedList.getVersion() != version) {
return false;
}
@@ -135,7 +139,7 @@ public class CompactionPipeline {
readOnlyCopy = new LinkedList<>(pipeline);
version++;
}
- if (closeSuffix && region != null) {
+ if (updateRegionSize && region != null) {
// update the global memstore size counter
long suffixDataSize = getSegmentsKeySize(suffix);
long newDataSize = 0;
http://git-wip-us.apache.org/repos/asf/hbase/blob/c77e2135/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index f1273a9..19b66b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -99,7 +99,7 @@ public class ImmutableSegment extends Segment {
super(null, // initiailize the CellSet with NULL
comparator, memStoreLAB);
this.type = type;
- // build the true CellSet based on CellArrayMap
+ // build the new CellSet based on CellArrayMap
CellSet cs = createCellArrayMapSet(numOfCells, iterator, merge);
this.setCellSet(null, cs); // update the CellSet of the new Segment
@@ -203,7 +203,7 @@ public class ImmutableSegment extends Segment {
cells[i] = maybeCloneWithAllocator(c);
}
boolean useMSLAB = (getMemStoreLAB()!=null);
- // second parameter true, because in compaction addition of the cell to new segment
+ // second parameter true, because in compaction/merge the addition of the cell to new segment
// is always successful
updateMetaInfo(c, true, useMSLAB, null); // updates the size per cell
i++;
http://git-wip-us.apache.org/repos/asf/hbase/blob/c77e2135/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index dfa7d18..0d3f47e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -44,22 +44,26 @@ import java.util.concurrent.atomic.AtomicBoolean;
@InterfaceAudience.Private
public class MemStoreCompactor {
+ // The upper bound for the number of segments we store in the pipeline prior to merging.
+ // This constant is subject to further experimentation.
+ // The external setting of the compacting MemStore behaviour
+ public static final String COMPACTING_MEMSTORE_THRESHOLD_KEY =
+ "hbase.hregion.compacting.pipeline.segments.limit";
+ // remaining with the same ("infinity") but configurable default for now
+ public static final int COMPACTING_MEMSTORE_THRESHOLD_DEFAULT = 30;
+
public static final long DEEP_OVERHEAD = ClassSize
.align(ClassSize.OBJECT
+ 4 * ClassSize.REFERENCE
// compactingMemStore, versionedList, action, isInterrupted (the reference)
// "action" is an enum and thus it is a class with static final constants,
// so counting only the size of the reference to it and not the size of the internals
- + Bytes.SIZEOF_INT // compactionKVMax
+ + 2 * Bytes.SIZEOF_INT // compactionKVMax, pipelineThreshold
+ ClassSize.ATOMIC_BOOLEAN // isInterrupted (the internals)
);
- // The upper bound for the number of segments we store in the pipeline prior to merging.
- // This constant is subject to further experimentation.
- private static final int THRESHOLD_PIPELINE_SEGMENTS = 30; // stands here for infinity
-
private static final Log LOG = LogFactory.getLog(MemStoreCompactor.class);
-
+ private final int pipelineThreshold; // the limit on the number of the segments in the pipeline
private CompactingMemStore compactingMemStore;
// a static version of the segment list from the pipeline
@@ -91,6 +95,9 @@ public class MemStoreCompactor {
this.compactionKVMax = compactingMemStore.getConfiguration()
.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
initiateAction(compactionPolicy);
+ pipelineThreshold = // get the limit on the number of the segments in the pipeline
+ compactingMemStore.getConfiguration().getInt(COMPACTING_MEMSTORE_THRESHOLD_KEY,
+ COMPACTING_MEMSTORE_THRESHOLD_DEFAULT);
}
/**----------------------------------------------------------------------
@@ -161,7 +168,7 @@ public class MemStoreCompactor {
// compaction shouldn't happen or doesn't worth it
// limit the number of the segments in the pipeline
int numOfSegments = versionedList.getNumOfSegments();
- if (numOfSegments > THRESHOLD_PIPELINE_SEGMENTS) {
+ if (numOfSegments > pipelineThreshold) {
LOG.debug("In-Memory Compaction Pipeline for store " + compactingMemStore.getFamilyName()
+ " is going to be merged, as there are " + numOfSegments + " segments");
return Action.MERGE; // to avoid too many segments, merge now
http://git-wip-us.apache.org/repos/asf/hbase/blob/c77e2135/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 57eee30..aae0a4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -756,22 +756,24 @@ public class TestWalAndCompactingMemStoreFlush {
}
@Test(timeout = 180000)
- public void testSelectiveFlushAndWALinIndexCompaction() throws IOException {
+ public void testSelectiveFlushWithBasicAndMerge() throws IOException {
// Set up the configuration
Configuration conf = HBaseConfiguration.create();
- conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024);
+ conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024);
conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
FlushNonSloppyStoresFirstPolicy.class.getName());
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
- 200 * 1024);
+ 75 * 1024);
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
- // set memstore to do data compaction and not to use the speculative scan
+ // set memstore to do index compaction with merge
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(MemoryCompactionPolicy.BASIC));
+ // length of pipeline that requires merge
+ conf.setInt(MemStoreCompactor.COMPACTING_MEMSTORE_THRESHOLD_KEY, 1);
// Intialize the HRegion
- HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
- // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
+ HRegion region = initHRegion("testSelectiveFlushWithBasicAndMerge", conf);
+ // Add 1200 entries for CF1 (CompactingMemStore), 100 for CF2 (DefaultMemStore) and 50 for CF3
for (int i = 1; i <= 1200; i++) {
region.put(createPut(1, i));
if (i <= 100) {
@@ -781,7 +783,7 @@ public class TestWalAndCompactingMemStoreFlush {
}
}
}
- // Now add more puts for CF2, so that we only flush CF2 to disk
+ // Now put more entries to CF2
for (int i = 100; i < 2000; i++) {
region.put(createPut(2, i));
}
@@ -800,13 +802,14 @@ public class TestWalAndCompactingMemStoreFlush {
// The total memstore size should be the same as the sum of the sizes of
// memstores of CF1, CF2 and CF3.
- assertEquals(totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize()
- + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
+ assertEquals(totalMemstoreSize,
+ cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize()
+ + cf3MemstoreSizePhaseI.getDataSize());
- // Flush!
+ // Initiate in-memory Flush!
((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).flushInMemory();
((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).flushInMemory();
- // CF1 and CF3 should be compacted so wait here to be sure the compaction is done
+ // CF1 and CF3 should be flatten and merged so wait here to be sure the merge is done
while (((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore)
.isMemStoreFlushingInMemory()) {
Threads.sleep(10);
@@ -815,21 +818,22 @@ public class TestWalAndCompactingMemStoreFlush {
.isMemStoreFlushingInMemory()) {
Threads.sleep(10);
}
+
+ // Flush-to-disk! CF2 only should be flushed
region.flush(false);
+ MemstoreSize cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getSizeOfMemStore();
MemstoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getSizeOfMemStore();
+ MemstoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getSizeOfMemStore();
- long smallestSeqInRegionCurrentMemstorePhaseII = region.getWAL()
- .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
- long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1);
- long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2);
- long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
-
+ // CF1 should be flushed in memory and just flattened, so CF1 heap overhead should be smaller
+ assertTrue(cf1MemstoreSizePhaseI.getHeapSize() > cf1MemstoreSizePhaseII.getHeapSize());
+ // CF1 should be flushed in memory and just flattened, so CF1 data size should remain the same
+ assertEquals(cf1MemstoreSizePhaseI.getDataSize(), cf1MemstoreSizePhaseII.getDataSize());
// CF2 should have been cleared
assertEquals(0, cf2MemstoreSizePhaseII.getDataSize());
- assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize());
- // Add same entries to compact them later
+ // Add the same amount of entries to see the merging
for (int i = 1; i <= 1200; i++) {
region.put(createPut(1, i));
if (i <= 100) {
@@ -844,16 +848,12 @@ public class TestWalAndCompactingMemStoreFlush {
region.put(createPut(2, i));
}
- long smallestSeqInRegionCurrentMemstorePhaseIII = region.getWAL()
- .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
- long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1);
- long smallestSeqCF2PhaseIII = region.getOldestSeqIdOfStore(FAMILY2);
- long smallestSeqCF3PhaseIII = region.getOldestSeqIdOfStore(FAMILY3);
+ MemstoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getSizeOfMemStore();
- // Flush!
+ // Flush in memory!
((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).flushInMemory();
((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).flushInMemory();
- // CF1 and CF3 should be compacted so wait here to be sure the compaction is done
+ // CF1 and CF3 should be merged so wait here to be sure the merge is done
while (((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore)
.isMemStoreFlushingInMemory()) {
Threads.sleep(10);
@@ -864,17 +864,28 @@ public class TestWalAndCompactingMemStoreFlush {
}
region.flush(false);
- long smallestSeqInRegionCurrentMemstorePhaseIV = region.getWAL()
- .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
- long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1);
- long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2);
- long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3);
+ MemstoreSize cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getSizeOfMemStore();
+ MemstoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getSizeOfMemStore();
- // now check that the LSN of the entire WAL, of CF1 and of CF3 has NOT progressed due to merge
- assertFalse(
- smallestSeqInRegionCurrentMemstorePhaseIV > smallestSeqInRegionCurrentMemstorePhaseIII);
- assertFalse(smallestSeqCF1PhaseIV > smallestSeqCF1PhaseIII);
- assertFalse(smallestSeqCF3PhaseIV > smallestSeqCF3PhaseIII);
+ assertEquals(2*cf1MemstoreSizePhaseI.getDataSize(), cf1MemstoreSizePhaseIV.getDataSize());
+ assertEquals(
+ cf1MemstoreSizePhaseI.getHeapSize() - cf1MemstoreSizePhaseII.getHeapSize(),
+ cf1MemstoreSizePhaseIII.getHeapSize() - cf1MemstoreSizePhaseIV.getHeapSize());
+ assertEquals(3, // active, one in pipeline, snapshot
+ ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).getSegments().size());
+ // CF2 should have been cleared
+ assertEquals("\n<<< DEBUG: The data--heap sizes of stores before/after first flushes,"
+ + " CF1: " + cf1MemstoreSizePhaseI.getDataSize() + "/" + cf1MemstoreSizePhaseII
+ .getDataSize() + "--" + cf1MemstoreSizePhaseI.getHeapSize() + "/" + cf1MemstoreSizePhaseII
+ .getHeapSize() + ", CF2: " + cf2MemstoreSizePhaseI.getDataSize() + "/"
+ + cf2MemstoreSizePhaseII.getDataSize() + "--" + cf2MemstoreSizePhaseI.getHeapSize() + "/"
+ + cf2MemstoreSizePhaseII.getHeapSize() + ", CF3: " + cf3MemstoreSizePhaseI.getDataSize()
+ + "/" + cf3MemstoreSizePhaseII.getDataSize() + "--" + cf3MemstoreSizePhaseI.getHeapSize()
+ + "/" + cf3MemstoreSizePhaseII.getHeapSize() + "\n<<< AND before/after second flushes "
+ + " CF1: " + cf1MemstoreSizePhaseIII.getDataSize() + "/" + cf1MemstoreSizePhaseIV
+ .getDataSize() + "--" + cf1MemstoreSizePhaseIII.getHeapSize() + "/" + cf1MemstoreSizePhaseIV
+ .getHeapSize() + "\n",
+ 0, cf2MemstoreSizePhaseIV.getDataSize());
HBaseTestingUtility.closeRegionAndWAL(region);
}
[13/23] hbase git commit: HBASE-17809 cleanup unused class
Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/fe3c32eb/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java
deleted file mode 100644
index 0efa6da..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import static org.junit.Assert.*;
-
-import java.util.Iterator;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({MiscTests.class, SmallTests.class})
-public class TestSortedCopyOnWriteSet {
-
- @Test
- public void testSorting() throws Exception {
- SortedCopyOnWriteSet<String> set = new SortedCopyOnWriteSet<>();
- set.add("c");
- set.add("d");
- set.add("a");
- set.add("b");
-
- String[] expected = new String[]{"a", "b", "c", "d"};
- String[] stored = set.toArray(new String[4]);
- assertArrayEquals(expected, stored);
-
- set.add("c");
- assertEquals(4, set.size());
- stored = set.toArray(new String[4]);
- assertArrayEquals(expected, stored);
- }
-
- @Test
- public void testIteratorIsolation() throws Exception {
- SortedCopyOnWriteSet<String> set = new SortedCopyOnWriteSet<>(Lists.newArrayList("a", "b", "c", "d", "e"));
-
- // isolation of remove()
- Iterator<String> iter = set.iterator();
- set.remove("c");
- boolean found = false;
- while (iter.hasNext() && !found) {
- found = "c".equals(iter.next());
- }
- assertTrue(found);
-
- iter = set.iterator();
- found = false;
- while (iter.hasNext() && !found) {
- found = "c".equals(iter.next());
- }
- assertFalse(found);
-
- // isolation of add()
- iter = set.iterator();
- set.add("f");
- found = false;
- while (iter.hasNext() && !found) {
- String next = iter.next();
- found = "f".equals(next);
- }
- assertFalse(found);
-
- // isolation of addAll()
- iter = set.iterator();
- set.addAll(Lists.newArrayList("g", "h", "i"));
- found = false;
- while (iter.hasNext() && !found) {
- String next = iter.next();
- found = "g".equals(next) || "h".equals(next) || "i".equals(next);
- }
- assertFalse(found);
-
- // isolation of clear()
- iter = set.iterator();
- set.clear();
- assertEquals(0, set.size());
- int size = 0;
- while (iter.hasNext()) {
- iter.next();
- size++;
- }
- assertTrue(size > 0);
- }
-
-}
-