You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by an...@apache.org on 2015/07/21 05:38:50 UTC
[3/3] hbase git commit: HBASE-13954 Remove
HTableInterface#getRowOrBefore related server side code. (Ashish)
HBASE-13954 Remove HTableInterface#getRowOrBefore related server side code. (Ashish)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b6db268
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b6db268
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b6db268
Branch: refs/heads/master
Commit: 3b6db2686380631027975c46b4a73478e299c82f
Parents: cceee1b
Author: anoopsjohn <an...@gmail.com>
Authored: Tue Jul 21 09:08:26 2015 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Tue Jul 21 09:08:26 2015 +0530
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/client/Get.java | 14 +-
.../org/apache/hadoop/hbase/client/Scan.java | 18 -
.../hadoop/hbase/protobuf/ProtobufUtil.java | 36 +-
.../hadoop/hbase/protobuf/RequestConverter.java | 30 -
.../hbase/client/TestClientNoCluster.java | 8 -
.../org/apache/hadoop/hbase/client/TestGet.java | 3 -
.../hbase/protobuf/generated/ClientProtos.java | 395 ++---
hbase-protocol/src/main/protobuf/Client.proto | 7 +-
.../hadoop/hbase/rest/client/RemoteHTable.java | 4 -
.../hadoop/hbase/client/HTableWrapper.java | 14 -
.../hbase/coprocessor/BaseRegionObserver.java | 34 +-
.../hbase/coprocessor/RegionObserver.java | 84 +-
.../hbase/regionserver/DefaultMemStore.java | 118 +-
.../GetClosestRowBeforeTracker.java | 242 ---
.../hadoop/hbase/regionserver/HRegion.java | 32 -
.../hadoop/hbase/regionserver/HStore.java | 148 --
.../hadoop/hbase/regionserver/MemStore.java | 7 -
.../hbase/regionserver/RSRpcServices.java | 38 +-
.../hadoop/hbase/regionserver/Region.java | 11 -
.../regionserver/RegionCoprocessorHost.java | 35 -
.../apache/hadoop/hbase/regionserver/Store.java | 14 -
.../hbase/security/access/AccessController.java | 23 -
.../hadoop/hbase/HBaseTestingUtility.java | 25 +
.../hadoop/hbase/client/TestFromClientSide.java | 60 +-
.../hbase/client/TestFromClientSideNoCodec.java | 1 -
.../hbase/coprocessor/SimpleRegionObserver.java | 36 -
.../regionserver/TestGetClosestAtOrBefore.java | 43 +-
.../hbase/regionserver/TestMinVersions.java | 8 +-
.../access/TestWithDisabledAuthorization.java | 10 -
.../hadoop/hbase/thrift/ThriftServerRunner.java | 20 +-
.../hadoop/hbase/thrift/generated/Hbase.java | 1641 ++----------------
.../org/apache/hadoop/hbase/thrift/Hbase.thrift | 16 -
32 files changed, 462 insertions(+), 2713 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index abf6b1c..c71ee0d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -110,7 +110,6 @@ public class Get extends Query
this.storeOffset = get.getRowOffsetPerColumnFamily();
this.tr = get.getTimeRange();
this.checkExistenceOnly = get.isCheckExistenceOnly();
- this.closestRowBefore = get.isClosestRowBefore();
Map<byte[], NavigableSet<byte[]>> fams = get.getFamilyMap();
for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
byte [] fam = entry.getKey();
@@ -137,12 +136,23 @@ public class Get extends Query
return this;
}
+ /**
+ * This will always return the default value which is false as client cannot set the value to this
+ * property any more.
+ * @deprecated since 2.0.0 and will be removed in 3.0.0
+ */
+ @Deprecated
public boolean isClosestRowBefore() {
return closestRowBefore;
}
+ /**
+ * This is not used any more and does nothing. Use reverse scan instead.
+ * @deprecated since 2.0.0 and will be removed in 3.0.0
+ */
+ @Deprecated
public Get setClosestRowBefore(boolean closestRowBefore) {
- this.closestRowBefore = closestRowBefore;
+ // do Nothing
return this;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 14b721b..9d46bc7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -938,24 +938,6 @@ public class Scan extends Query {
}
/**
- * Utility that creates a Scan that will do a small scan in reverse from passed row
- * looking for next closest row.
- * @param row
- * @param family
- * @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
- * scan in reverse for one row only.
- */
- static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
- // Below does not work if you add in family; need to add the family qualifier that is highest
- // possible family qualifier. Do we have such a notion? Would have to be magic.
- Scan scan = new Scan(row);
- scan.setSmall(true);
- scan.setReversed(true);
- scan.setCaching(1);
- return scan;
- }
-
- /**
* Enable collection of {@link ScanMetrics}. For advanced users.
* @param enabled Set to true to enable accumulating scan metrics
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index b72f0bb..36f6fb5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
@@ -122,12 +121,12 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
-import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
import org.apache.hadoop.hbase.quotas.QuotaScope;
import org.apache.hadoop.hbase.quotas.QuotaType;
@@ -489,9 +488,6 @@ public final class ProtobufUtil {
if (proto.hasExistenceOnly() && proto.getExistenceOnly()){
get.setCheckExistenceOnly(true);
}
- if (proto.hasClosestRowBefore() && proto.getClosestRowBefore()){
- get.setClosestRowBefore(true);
- }
if (proto.hasConsistency()) {
get.setConsistency(toConsistency(proto.getConsistency()));
}
@@ -1077,9 +1073,6 @@ public final class ProtobufUtil {
if (get.isCheckExistenceOnly()){
builder.setExistenceOnly(true);
}
- if (get.isClosestRowBefore()){
- builder.setClosestRowBefore(true);
- }
if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) {
builder.setConsistency(toConsistency(get.getConsistency()));
}
@@ -1550,33 +1543,6 @@ public final class ProtobufUtil {
// Start helpers for Client
/**
- * A helper to get a row of the closet one before using client protocol.
- *
- * @param client
- * @param regionName
- * @param row
- * @param family
- * @return the row or the closestRowBefore if it doesn't exist
- * @throws IOException
- * @deprecated since 0.99 - use reversed scanner instead.
- */
- @Deprecated
- public static Result getRowOrBefore(final ClientService.BlockingInterface client,
- final byte[] regionName, final byte[] row,
- final byte[] family) throws IOException {
- GetRequest request =
- RequestConverter.buildGetRowOrBeforeRequest(
- regionName, row, family);
- try {
- GetResponse response = client.get(null, request);
- if (!response.hasResult()) return null;
- return toResult(response.getResult());
- } catch (ServiceException se) {
- throw getRemoteException(se);
- }
- }
-
- /**
* A helper to bulk load a list of HFiles using client protocol.
*
* @param client
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
index 96260fd..c111be2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
@@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionReques
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
@@ -123,35 +122,6 @@ public final class RequestConverter {
// Start utilities for Client
-/**
- * Create a new protocol buffer GetRequest to get a row, all columns in a family.
- * If there is no such row, return the closest row before it.
- *
- * @param regionName the name of the region to get
- * @param row the row to get
- * @param family the column family to get
- * should return the immediate row before
- * @return a protocol buffer GetReuqest
- */
- public static GetRequest buildGetRowOrBeforeRequest(
- final byte[] regionName, final byte[] row, final byte[] family) {
- GetRequest.Builder builder = GetRequest.newBuilder();
- RegionSpecifier region = buildRegionSpecifier(
- RegionSpecifierType.REGION_NAME, regionName);
- builder.setRegion(region);
-
- Column.Builder columnBuilder = Column.newBuilder();
- columnBuilder.setFamily(ByteStringer.wrap(family));
- ClientProtos.Get.Builder getBuilder =
- ClientProtos.Get.newBuilder();
- getBuilder.setRow(ByteStringer.wrap(row));
- getBuilder.addColumn(columnBuilder.build());
- getBuilder.setClosestRowBefore(true);
- builder.setGet(getBuilder.build());
- return builder.build();
- }
-
-
/**
* Create a protocol buffer GetRequest for a client Get
*
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 66a80b0..e941440 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -514,14 +514,6 @@ public class TestClientNoCluster extends Configured implements Tool {
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
ByteString row = request.getGet().getRow();
Pair<HRegionInfo, ServerName> p = meta.get(row.toByteArray());
- if (p == null) {
- if (request.getGet().getClosestRowBefore()) {
- byte [] bytes = row.toByteArray();
- SortedMap<byte [], Pair<HRegionInfo, ServerName>> head =
- bytes != null? meta.headMap(bytes): meta;
- p = head == null? null: head.get(head.lastKey());
- }
- }
if (p != null) {
resultBuilder.addCell(getRegionInfo(row, p.getFirst()));
resultBuilder.addCell(getServer(row, p.getSecond()));
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
index 0e18ecf..f370751 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
@@ -168,7 +168,6 @@ public class TestGet {
get.setReplicaId(2);
get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
get.setCheckExistenceOnly(true);
- get.setClosestRowBefore(true);
get.setTimeRange(3, 4);
get.setMaxVersions(11);
get.setMaxResultsPerColumnFamily(10);
@@ -191,9 +190,7 @@ public class TestGet {
// from Get class
assertEquals(get.isCheckExistenceOnly(), copyGet.isCheckExistenceOnly());
- assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore());
assertTrue(get.getTimeRange().equals(copyGet.getTimeRange()));
- assertEquals(get.isClosestRowBefore(), copyGet.isClosestRowBefore());
assertEquals(get.getMaxVersions(), copyGet.getMaxVersions());
assertEquals(get.getMaxResultsPerColumnFamily(), copyGet.getMaxResultsPerColumnFamily());
assertEquals(get.getRowOffsetPerColumnFamily(), copyGet.getRowOffsetPerColumnFamily());
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index c90a625..c4b1eec 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -1926,26 +1926,6 @@ public final class ClientProtos {
*/
boolean getExistenceOnly();
- // optional bool closest_row_before = 11 [default = false];
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- boolean hasClosestRowBefore();
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- boolean getClosestRowBefore();
-
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
/**
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
@@ -1963,8 +1943,7 @@ public final class ClientProtos {
**
* The protocol buffer version of Get.
* Unless existence_only is specified, return all the requested data
- * for the row that matches exactly, or the one that immediately
- * precedes it if closest_row_before is specified.
+ * for the row that matches exactly.
* </pre>
*/
public static final class Get extends
@@ -2087,18 +2066,13 @@ public final class ClientProtos {
existenceOnly_ = input.readBool();
break;
}
- case 88: {
- bitField0_ |= 0x00000100;
- closestRowBefore_ = input.readBool();
- break;
- }
case 96: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(12, rawValue);
} else {
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000100;
consistency_ = value;
}
break;
@@ -2371,32 +2345,6 @@ public final class ClientProtos {
return existenceOnly_;
}
- // optional bool closest_row_before = 11 [default = false];
- public static final int CLOSEST_ROW_BEFORE_FIELD_NUMBER = 11;
- private boolean closestRowBefore_;
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- public boolean hasClosestRowBefore() {
- return ((bitField0_ & 0x00000100) == 0x00000100);
- }
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- public boolean getClosestRowBefore() {
- return closestRowBefore_;
- }
-
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
public static final int CONSISTENCY_FIELD_NUMBER = 12;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
@@ -2404,7 +2352,7 @@ public final class ClientProtos {
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
*/
public boolean hasConsistency() {
- return ((bitField0_ & 0x00000200) == 0x00000200);
+ return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
@@ -2424,7 +2372,6 @@ public final class ClientProtos {
storeLimit_ = 0;
storeOffset_ = 0;
existenceOnly_ = false;
- closestRowBefore_ = false;
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
}
private byte memoizedIsInitialized = -1;
@@ -2492,9 +2439,6 @@ public final class ClientProtos {
output.writeBool(10, existenceOnly_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
- output.writeBool(11, closestRowBefore_);
- }
- if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeEnum(12, consistency_.getNumber());
}
getUnknownFields().writeTo(output);
@@ -2548,10 +2492,6 @@ public final class ClientProtos {
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(11, closestRowBefore_);
- }
- if (((bitField0_ & 0x00000200) == 0x00000200)) {
- size += com.google.protobuf.CodedOutputStream
.computeEnumSize(12, consistency_.getNumber());
}
size += getUnknownFields().getSerializedSize();
@@ -2621,11 +2561,6 @@ public final class ClientProtos {
result = result && (getExistenceOnly()
== other.getExistenceOnly());
}
- result = result && (hasClosestRowBefore() == other.hasClosestRowBefore());
- if (hasClosestRowBefore()) {
- result = result && (getClosestRowBefore()
- == other.getClosestRowBefore());
- }
result = result && (hasConsistency() == other.hasConsistency());
if (hasConsistency()) {
result = result &&
@@ -2684,10 +2619,6 @@ public final class ClientProtos {
hash = (37 * hash) + EXISTENCE_ONLY_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getExistenceOnly());
}
- if (hasClosestRowBefore()) {
- hash = (37 * hash) + CLOSEST_ROW_BEFORE_FIELD_NUMBER;
- hash = (53 * hash) + hashBoolean(getClosestRowBefore());
- }
if (hasConsistency()) {
hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getConsistency());
@@ -2770,8 +2701,7 @@ public final class ClientProtos {
**
* The protocol buffer version of Get.
* Unless existence_only is specified, return all the requested data
- * for the row that matches exactly, or the one that immediately
- * precedes it if closest_row_before is specified.
+ * for the row that matches exactly.
* </pre>
*/
public static final class Builder extends
@@ -2849,10 +2779,8 @@ public final class ClientProtos {
bitField0_ = (bitField0_ & ~0x00000100);
existenceOnly_ = false;
bitField0_ = (bitField0_ & ~0x00000200);
- closestRowBefore_ = false;
- bitField0_ = (bitField0_ & ~0x00000400);
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
- bitField0_ = (bitField0_ & ~0x00000800);
+ bitField0_ = (bitField0_ & ~0x00000400);
return this;
}
@@ -2942,10 +2870,6 @@ public final class ClientProtos {
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000100;
}
- result.closestRowBefore_ = closestRowBefore_;
- if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
- to_bitField0_ |= 0x00000200;
- }
result.consistency_ = consistency_;
result.bitField0_ = to_bitField0_;
onBuilt();
@@ -3039,9 +2963,6 @@ public final class ClientProtos {
if (other.hasExistenceOnly()) {
setExistenceOnly(other.getExistenceOnly());
}
- if (other.hasClosestRowBefore()) {
- setClosestRowBefore(other.getClosestRowBefore());
- }
if (other.hasConsistency()) {
setConsistency(other.getConsistency());
}
@@ -4029,66 +3950,13 @@ public final class ClientProtos {
return this;
}
- // optional bool closest_row_before = 11 [default = false];
- private boolean closestRowBefore_ ;
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- public boolean hasClosestRowBefore() {
- return ((bitField0_ & 0x00000400) == 0x00000400);
- }
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- public boolean getClosestRowBefore() {
- return closestRowBefore_;
- }
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- public Builder setClosestRowBefore(boolean value) {
- bitField0_ |= 0x00000400;
- closestRowBefore_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bool closest_row_before = 11 [default = false];</code>
- *
- * <pre>
- * If the row to get doesn't exist, return the
- * closest row before.
- * </pre>
- */
- public Builder clearClosestRowBefore() {
- bitField0_ = (bitField0_ & ~0x00000400);
- closestRowBefore_ = false;
- onChanged();
- return this;
- }
-
// optional .hbase.pb.Consistency consistency = 12 [default = STRONG];
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
/**
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
*/
public boolean hasConsistency() {
- return ((bitField0_ & 0x00000800) == 0x00000800);
+ return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
@@ -4103,7 +3971,7 @@ public final class ClientProtos {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000800;
+ bitField0_ |= 0x00000400;
consistency_ = value;
onChanged();
return this;
@@ -4112,7 +3980,7 @@ public final class ClientProtos {
* <code>optional .hbase.pb.Consistency consistency = 12 [default = STRONG];</code>
*/
public Builder clearConsistency() {
- bitField0_ = (bitField0_ & ~0x00000800);
+ bitField0_ = (bitField0_ & ~0x00000400);
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
onChanged();
return this;
@@ -33253,135 +33121,134 @@ public final class ClientProtos {
"o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" +
"label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" +
"ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" +
- "ualifier\030\002 \003(\014\"\201\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
+ "ualifier\030\002 \003(\014\"\336\002\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
"olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" +
"te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" +
"ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" +
"e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers",
"ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" +
"e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" +
- " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\022!\n\022c" +
- "losest_row_before\030\013 \001(\010:\005false\0222\n\013consis" +
- "tency\030\014 \001(\0162\025.hbase.pb.Consistency:\006STRO" +
- "NG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.C" +
- "ell\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006ex" +
- "ists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007part" +
- "ial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region" +
- "\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get",
- "\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006" +
- "result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondi" +
- "tion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqua" +
- "lifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hba" +
- "se.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024." +
- "hbase.pb.Comparator\"\364\006\n\rMutationProto\022\013\n" +
- "\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase." +
- "pb.MutationProto.MutationType\0229\n\014column_" +
- "value\030\003 \003(\0132#.hbase.pb.MutationProto.Col" +
- "umnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute",
- "\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurab" +
- "ility\030\006 \001(\0162\".hbase.pb.MutationProto.Dur" +
- "ability:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\013" +
- "2\023.hbase.pb.TimeRange\022\035\n\025associated_cell" +
- "_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013ColumnVa" +
- "lue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value\030\002" +
- " \003(\01322.hbase.pb.MutationProto.ColumnValu" +
- "e.QualifierValue\032\214\001\n\016QualifierValue\022\021\n\tq" +
- "ualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimesta" +
- "mp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase.pb",
- ".MutationProto.DeleteType\022\014\n\004tags\030\005 \001(\014\"" +
- "W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_W" +
- "AL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSY" +
- "NC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020\000\022\r\n\t" +
- "INCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDele" +
- "teType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030DELETE" +
- "_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022" +
- "\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMutateReq" +
- "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
- "ecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.pb.Mut",
- "ationProto\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" +
- ".Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016Mutat" +
- "eResponse\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Res" +
- "ult\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006colum" +
- "n\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattribute\030\002" +
- " \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tstart_r" +
- "ow\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filter\030\005 \001" +
- "(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030\006 \001(\013" +
- "2\023.hbase.pb.TimeRange\022\027\n\014max_versions\030\007 " +
- "\001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nba",
- "tch_size\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022" +
- "\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001" +
- "(\r\022&\n\036load_column_families_on_demand\030\r \001" +
- "(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005fal" +
- "se\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb.Consi" +
- "stency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n\013Scan" +
- "Request\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Regio" +
- "nSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan" +
- "\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004" +
- " \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_call",
- "_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030\007 " +
- "\001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010\022\032" +
- "\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014ScanRespo" +
- "nse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner" +
- "_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004" +
- " \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022" +
- "\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_resul" +
- "t\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022" +
- "\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_metri" +
- "cs\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n\024Bulk",
- "LoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hbase" +
- ".pb.RegionSpecifier\022>\n\013family_path\030\002 \003(\013" +
- "2).hbase.pb.BulkLoadHFileRequest.FamilyP" +
- "ath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFamilyPat" +
- "h\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkL" +
- "oadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026Copr" +
- "ocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014servi" +
- "ce_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007re" +
- "quest\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022" +
- "&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"",
- "v\n\031CoprocessorServiceRequest\022)\n\006region\030\001" +
- " \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030" +
- "\002 \002(\0132 .hbase.pb.CoprocessorServiceCall\"" +
- "o\n\032CoprocessorServiceResponse\022)\n\006region\030" +
- "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005valu" +
- "e\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Act" +
- "ion\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.h" +
- "base.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hba" +
- "se.pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase." +
- "pb.CoprocessorServiceCall\"k\n\014RegionActio",
- "n\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpeci" +
- "fier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.h" +
- "base.pb.Action\"D\n\017RegionLoadStats\022\027\n\014mem" +
- "storeLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(" +
- "\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(" +
- "\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\te" +
- "xception\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022" +
- ":\n\016service_result\030\004 \001(\0132\".hbase.pb.Copro" +
- "cessorServiceResult\022,\n\tloadStats\030\005 \001(\0132\031" +
- ".hbase.pb.RegionLoadStats\"x\n\022RegionActio",
- "nResult\0226\n\021resultOrException\030\001 \003(\0132\033.hba" +
- "se.pb.ResultOrException\022*\n\texception\030\002 \001" +
- "(\0132\027.hbase.pb.NameBytesPair\"x\n\014MultiRequ" +
- "est\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Reg" +
- "ionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tconditi" +
- "on\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRe" +
- "sponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hba" +
- "se.pb.RegionActionResult\022\021\n\tprocessed\030\002 " +
- "\001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" +
- "NE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb",
- ".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" +
- "tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" +
- ".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" +
- "equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" +
- "adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" +
- "\037.hbase.pb.BulkLoadHFileResponse\022X\n\013Exec" +
- "Service\022#.hbase.pb.CoprocessorServiceReq" +
- "uest\032$.hbase.pb.CoprocessorServiceRespon" +
- "se\022d\n\027ExecRegionServerService\022#.hbase.pb" +
- ".CoprocessorServiceRequest\032$.hbase.pb.Co",
- "processorServiceResponse\0228\n\005Multi\022\026.hbas" +
- "e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" +
- "seBB\n*org.apache.hadoop.hbase.protobuf.g" +
- "eneratedB\014ClientProtosH\001\210\001\001\240\001\001"
+ " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" +
+ "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" +
+ "\006STRONG\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase" +
+ ".pb.Cell\022\035\n\025associated_cell_count\030\002 \001(\005\022" +
+ "\016\n\006exists\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n" +
+ "\007partial\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006r" +
+ "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032" +
+ "\n\003get\030\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetRespons",
+ "e\022 \n\006result\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\t" +
+ "Condition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021" +
+ "\n\tqualifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162" +
+ "\025.hbase.pb.CompareType\022(\n\ncomparator\030\005 \002" +
+ "(\0132\024.hbase.pb.Comparator\"\364\006\n\rMutationPro" +
+ "to\022\013\n\003row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.h" +
+ "base.pb.MutationProto.MutationType\0229\n\014co" +
+ "lumn_value\030\003 \003(\0132#.hbase.pb.MutationProt" +
+ "o.ColumnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattr" +
+ "ibute\030\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\n",
+ "durability\030\006 \001(\0162\".hbase.pb.MutationProt" +
+ "o.Durability:\013USE_DEFAULT\022\'\n\ntime_range\030" +
+ "\007 \001(\0132\023.hbase.pb.TimeRange\022\035\n\025associated" +
+ "_cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\371\001\n\013Col" +
+ "umnValue\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_va" +
+ "lue\030\002 \003(\01322.hbase.pb.MutationProto.Colum" +
+ "nValue.QualifierValue\032\214\001\n\016QualifierValue" +
+ "\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\tti" +
+ "mestamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hba" +
+ "se.pb.MutationProto.DeleteType\022\014\n\004tags\030\005",
+ " \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010S" +
+ "KIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r" +
+ "\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006APPEND\020" +
+ "\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n" +
+ "\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000\022\034\n\030D" +
+ "ELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE_FAMI" +
+ "LY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\242\001\n\rMuta" +
+ "teRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Reg" +
+ "ionSpecifier\022)\n\010mutation\030\002 \002(\0132\027.hbase.p" +
+ "b.MutationProto\022&\n\tcondition\030\003 \001(\0132\023.hba",
+ "se.pb.Condition\022\023\n\013nonce_group\030\004 \001(\004\"E\n\016" +
+ "MutateResponse\022 \n\006result\030\001 \001(\0132\020.hbase.p" +
+ "b.Result\022\021\n\tprocessed\030\002 \001(\010\"\346\003\n\004Scan\022 \n\006" +
+ "column\030\001 \003(\0132\020.hbase.pb.Column\022*\n\tattrib" +
+ "ute\030\002 \003(\0132\027.hbase.pb.NameBytesPair\022\021\n\tst" +
+ "art_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022 \n\006filte" +
+ "r\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_range\030" +
+ "\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_versio" +
+ "ns\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022" +
+ "\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_result_size\030\n",
+ " \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014store_offse" +
+ "t\030\014 \001(\r\022&\n\036load_column_families_on_deman" +
+ "d\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010" +
+ ":\005false\0222\n\013consistency\030\020 \001(\0162\025.hbase.pb." +
+ "Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r\"\220\002\n" +
+ "\013ScanRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb." +
+ "RegionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb" +
+ ".Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_r" +
+ "ows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext" +
+ "_call_seq\030\006 \001(\004\022\037\n\027client_handles_partia",
+ "ls\030\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 " +
+ "\001(\010\022\032\n\022track_scan_metrics\030\t \001(\010\"\232\002\n\014Scan" +
+ "Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" +
+ "anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" +
+ "ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re" +
+ "sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" +
+ "result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" +
+ " \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" +
+ "metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" +
+ "\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.",
+ "hbase.pb.RegionSpecifier\022>\n\013family_path\030" +
+ "\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" +
+ "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" +
+ "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" +
+ "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" +
+ "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" +
+ "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" +
+ "\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" +
+ "sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" +
+ "Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg",
+ "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" +
+ "call\030\002 \002(\0132 .hbase.pb.CoprocessorService" +
+ "Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" +
+ "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" +
+ "\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001" +
+ "\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" +
+ "\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" +
+ "\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" +
+ "base.pb.CoprocessorServiceCall\"k\n\014Region" +
+ "Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region",
+ "Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" +
+ "\0132\020.hbase.pb.Action\"D\n\017RegionLoadStats\022\027" +
+ "\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" +
+ "\030\002 \001(\005:\0010\"\332\001\n\021ResultOrException\022\r\n\005index" +
+ "\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result" +
+ "\022*\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytes" +
+ "Pair\022:\n\016service_result\030\004 \001(\0132\".hbase.pb." +
+ "CoprocessorServiceResult\022,\n\tloadStats\030\005 " +
+ "\001(\0132\031.hbase.pb.RegionLoadStats\"x\n\022Region" +
+ "ActionResult\0226\n\021resultOrException\030\001 \003(\0132",
+ "\033.hbase.pb.ResultOrException\022*\n\texceptio" +
+ "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" +
+ "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" +
+ "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" +
+ "ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\\\n\rMu" +
+ "ltiResponse\0228\n\022regionActionResult\030\001 \003(\0132" +
+ "\034.hbase.pb.RegionActionResult\022\021\n\tprocess" +
+ "ed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010T" +
+ "IMELINE\020\0012\203\004\n\rClientService\0222\n\003Get\022\024.hba" +
+ "se.pb.GetRequest\032\025.hbase.pb.GetResponse\022",
+ ";\n\006Mutate\022\027.hbase.pb.MutateRequest\032\030.hba" +
+ "se.pb.MutateResponse\0225\n\004Scan\022\025.hbase.pb." +
+ "ScanRequest\032\026.hbase.pb.ScanResponse\022P\n\rB" +
+ "ulkLoadHFile\022\036.hbase.pb.BulkLoadHFileReq" +
+ "uest\032\037.hbase.pb.BulkLoadHFileResponse\022X\n" +
+ "\013ExecService\022#.hbase.pb.CoprocessorServi" +
+ "ceRequest\032$.hbase.pb.CoprocessorServiceR" +
+ "esponse\022d\n\027ExecRegionServerService\022#.hba" +
+ "se.pb.CoprocessorServiceRequest\032$.hbase." +
+ "pb.CoprocessorServiceResponse\0228\n\005Multi\022\026",
+ ".hbase.pb.MultiRequest\032\027.hbase.pb.MultiR" +
+ "esponseBB\n*org.apache.hadoop.hbase.proto" +
+ "buf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -33411,7 +33278,7 @@ public final class ClientProtos {
internal_static_hbase_pb_Get_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_Get_descriptor,
- new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "ClosestRowBefore", "Consistency", });
+ new java.lang.String[] { "Row", "Column", "Attribute", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "StoreLimit", "StoreOffset", "ExistenceOnly", "Consistency", });
internal_static_hbase_pb_Result_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hbase_pb_Result_fieldAccessorTable = new
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index 3390de7..101854d 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -64,8 +64,7 @@ enum Consistency {
/**
* The protocol buffer version of Get.
* Unless existence_only is specified, return all the requested data
- * for the row that matches exactly, or the one that immediately
- * precedes it if closest_row_before is specified.
+ * for the row that matches exactly.
*/
message Get {
required bytes row = 1;
@@ -82,10 +81,6 @@ message Get {
// the existence.
optional bool existence_only = 10 [default = false];
- // If the row to get doesn't exist, return the
- // closest row before.
- optional bool closest_row_before = 11 [default = false];
-
optional Consistency consistency = 12 [default = STRONG];
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 0f1a238..172b763 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -675,10 +675,6 @@ public class RemoteHTable implements Table {
return true;
}
- public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
- throw new IOException("getRowOrBefore not supported");
- }
-
@Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Put put) throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index 6713546..6ea743a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -115,20 +115,6 @@ public final class HTableWrapper implements Table {
}
}
- /**
- * @deprecated in 0.99 since setting clearBufferOnFail is deprecated.
- */
- @Deprecated
- public Result getRowOrBefore(byte[] row, byte[] family)
- throws IOException {
- Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row);
- Result startRowResult = null;
- try (ResultScanner resultScanner = this.table.getScanner(scan)) {
- startRowResult = resultScanner.next();
- }
- return startRowResult;
- }
-
public Result get(Get get) throws IOException {
return table.get(get);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index f47c9f4..2e6d514 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -19,14 +19,14 @@ import java.io.IOException;
import java.util.List;
import java.util.NavigableSet;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@@ -54,9 +54,9 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WALKey;
import com.google.common.collect.ImmutableList;
@@ -120,7 +120,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
@Override
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
}
-
+
@Override
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> c,
byte[] splitRow) throws IOException {
@@ -130,22 +130,22 @@ public abstract class BaseRegionObserver implements RegionObserver {
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
byte[] splitKey, List<Mutation> metaEntries) throws IOException {
}
-
+
@Override
public void preSplitAfterPONR(
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
}
-
+
@Override
public void preRollBackSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
}
-
+
@Override
public void postRollBackSplit(
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
}
-
+
@Override
public void postCompleteSplit(
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
@@ -219,18 +219,6 @@ public abstract class BaseRegionObserver implements RegionObserver {
}
@Override
- public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
- final byte [] row, final byte [] family, final Result result)
- throws IOException {
- }
-
- @Override
- public void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> e,
- final byte [] row, final byte [] family, final Result result)
- throws IOException {
- }
-
- @Override
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
final Get get, final List<Cell> results) throws IOException {
}
@@ -253,12 +241,12 @@ public abstract class BaseRegionObserver implements RegionObserver {
}
@Override
- public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
+ public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
final Put put, final WALEdit edit, final Durability durability) throws IOException {
}
@Override
- public void postPut(final ObserverContext<RegionCoprocessorEnvironment> e,
+ public void postPut(final ObserverContext<RegionCoprocessorEnvironment> e,
final Put put, final WALEdit edit, final Durability durability) throws IOException {
}
@@ -278,7 +266,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
final Delete delete, final WALEdit edit, final Durability durability)
throws IOException {
}
-
+
@Override
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index fd19ede..e87a590 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -382,7 +382,7 @@ public interface RegionObserver extends Coprocessor {
void preSplitBeforePONR(final ObserverContext<RegionCoprocessorEnvironment> ctx,
byte[] splitKey, List<Mutation> metaEntries) throws IOException;
-
+
/**
* This will be called after PONR step as part of split transaction
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
@@ -391,9 +391,9 @@ public interface RegionObserver extends Coprocessor {
* @throws IOException
*/
void preSplitAfterPONR(final ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException;
-
+
/**
- * This will be called before the roll back of the split region is completed
+ * This will be called before the roll back of the split region is completed
* @param ctx
* @throws IOException
*/
@@ -419,7 +419,7 @@ public interface RegionObserver extends Coprocessor {
* Called before the region is reported as closed to the master.
* @param c the environment provided by the region server
* @param abortRequested true if the region server is aborting
- * @throws IOException
+ * @throws IOException
*/
void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
boolean abortRequested) throws IOException;
@@ -433,40 +433,6 @@ public interface RegionObserver extends Coprocessor {
boolean abortRequested);
/**
- * Called before a client makes a GetClosestRowBefore request.
- * <p>
- * Call CoprocessorEnvironment#bypass to skip default actions
- * <p>
- * Call CoprocessorEnvironment#complete to skip any subsequent chained
- * coprocessors
- * @param c the environment provided by the region server
- * @param row the row
- * @param family the family
- * @param result The result to return to the client if default processing
- * is bypassed. Can be modified. Will not be used if default processing
- * is not bypassed.
- * @throws IOException if an error occurred on the coprocessor
- */
- void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
- final byte [] row, final byte [] family, final Result result)
- throws IOException;
-
- /**
- * Called after a client makes a GetClosestRowBefore request.
- * <p>
- * Call CoprocessorEnvironment#complete to skip any subsequent chained
- * coprocessors
- * @param c the environment provided by the region server
- * @param row the row
- * @param family the desired family
- * @param result the result to return to the client, modify as necessary
- * @throws IOException if an error occurred on the coprocessor
- */
- void postGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
- final byte [] row, final byte [] family, final Result result)
- throws IOException;
-
- /**
* Called before the client performs a Get
* <p>
* Call CoprocessorEnvironment#bypass to skip default actions
@@ -543,7 +509,7 @@ public interface RegionObserver extends Coprocessor {
* @param durability Persistence guarantee for this Put
* @throws IOException if an error occurred on the coprocessor
*/
- void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
+ void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability)
throws IOException;
@@ -558,7 +524,7 @@ public interface RegionObserver extends Coprocessor {
* @param durability Persistence guarantee for this Put
* @throws IOException if an error occurred on the coprocessor
*/
- void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
+ void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability)
throws IOException;
@@ -575,7 +541,7 @@ public interface RegionObserver extends Coprocessor {
* @param durability Persistence guarantee for this Delete
* @throws IOException if an error occurred on the coprocessor
*/
- void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
+ void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability)
throws IOException;
/**
@@ -611,7 +577,7 @@ public interface RegionObserver extends Coprocessor {
void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability)
throws IOException;
-
+
/**
* This will be called for every batch mutation operation happening at the server. This will be
* called after acquiring the locks on the mutating rows and after applying the proper timestamp
@@ -658,7 +624,7 @@ public interface RegionObserver extends Coprocessor {
* Called after the completion of batch put/delete and will be called even if the batch operation
* fails
* @param ctx
- * @param miniBatchOp
+ * @param miniBatchOp
* @param success true if batch operation is successful otherwise false.
* @throws IOException
*/
@@ -679,7 +645,7 @@ public interface RegionObserver extends Coprocessor {
* @param compareOp the comparison operation
* @param comparator the comparator
* @param put data to put if check succeeds
- * @param result
+ * @param result
* @return the return value to return to client if bypassing default
* processing
* @throws IOException if an error occurred on the coprocessor
@@ -693,8 +659,8 @@ public interface RegionObserver extends Coprocessor {
/**
* Called before checkAndPut but after acquiring rowlock.
* <p>
- * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
- * Row will be locked for longer time. Trying to acquire lock on another row, within this,
+ * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
+ * Row will be locked for longer time. Trying to acquire lock on another row, within this,
* can lead to potential deadlock.
* <p>
* Call CoprocessorEnvironment#bypass to skip default actions
@@ -708,14 +674,14 @@ public interface RegionObserver extends Coprocessor {
* @param compareOp the comparison operation
* @param comparator the comparator
* @param put data to put if check succeeds
- * @param result
+ * @param result
* @return the return value to return to client if bypassing default
* processing
* @throws IOException if an error occurred on the coprocessor
*/
boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOp compareOp,
- final ByteArrayComparable comparator, final Put put,
+ final ByteArrayComparable comparator, final Put put,
final boolean result) throws IOException;
/**
@@ -754,7 +720,7 @@ public interface RegionObserver extends Coprocessor {
* @param compareOp the comparison operation
* @param comparator the comparator
* @param delete delete to commit if check succeeds
- * @param result
+ * @param result
* @return the value to return to client if bypassing default processing
* @throws IOException if an error occurred on the coprocessor
*/
@@ -767,8 +733,8 @@ public interface RegionObserver extends Coprocessor {
/**
* Called before checkAndDelete but after acquiring rowock.
* <p>
- * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
- * Row will be locked for longer time. Trying to acquire lock on another row, within this,
+ * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
+ * Row will be locked for longer time. Trying to acquire lock on another row, within this,
* can lead to potential deadlock.
* <p>
* Call CoprocessorEnvironment#bypass to skip default actions
@@ -782,7 +748,7 @@ public interface RegionObserver extends Coprocessor {
* @param compareOp the comparison operation
* @param comparator the comparator
* @param delete delete to commit if check succeeds
- * @param result
+ * @param result
* @return the value to return to client if bypassing default processing
* @throws IOException if an error occurred on the coprocessor
*/
@@ -877,8 +843,8 @@ public interface RegionObserver extends Coprocessor {
/**
* Called before Append but after acquiring rowlock.
* <p>
- * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
- * Row will be locked for longer time. Trying to acquire lock on another row, within this,
+ * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
+ * Row will be locked for longer time. Trying to acquire lock on another row, within this,
* can lead to potential deadlock.
* <p>
* Call CoprocessorEnvironment#bypass to skip default actions
@@ -927,14 +893,14 @@ public interface RegionObserver extends Coprocessor {
/**
* Called before Increment but after acquiring rowlock.
* <p>
- * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
- * Row will be locked for longer time. Trying to acquire lock on another row, within this,
+ * <b>Note:</b> Caution to be taken for not doing any long time operation in this hook.
+ * Row will be locked for longer time. Trying to acquire lock on another row, within this,
* can lead to potential deadlock.
* <p>
* Call CoprocessorEnvironment#bypass to skip default actions
* <p>
* Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors
- *
+ *
* @param c
* the environment provided by the region server
* @param increment
@@ -1227,7 +1193,7 @@ public interface RegionObserver extends Coprocessor {
* Called before creation of Reader for a store file.
* Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
* effect in this hook.
- *
+ *
* @param ctx the environment provided by the region server
* @param fs fileystem to read from
* @param p path to the file
@@ -1246,7 +1212,7 @@ public interface RegionObserver extends Coprocessor {
/**
* Called after the creation of Reader for a store file.
- *
+ *
* @param ctx the environment provided by the region server
* @param fs fileystem to read from
* @param p path to the file
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 161e4b6..cc8c3a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -31,15 +31,14 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.ByteRange;
import org.apache.hadoop.hbase.util.Bytes;
@@ -385,85 +384,6 @@ public class DefaultMemStore implements MemStore {
}
/**
- * @param state column/delete tracking state
- */
- @Override
- public void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state) {
- getRowKeyAtOrBefore(cellSet, state);
- getRowKeyAtOrBefore(snapshot, state);
- }
-
- /*
- * @param set
- * @param state Accumulates deletes and candidates.
- */
- private void getRowKeyAtOrBefore(final NavigableSet<Cell> set,
- final GetClosestRowBeforeTracker state) {
- if (set.isEmpty()) {
- return;
- }
- if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) {
- // Found nothing in row. Try backing up.
- getRowKeyBefore(set, state);
- }
- }
-
- /*
- * Walk forward in a row from <code>firstOnRow</code>. Presumption is that
- * we have been passed the first possible key on a row. As we walk forward
- * we accumulate deletes until we hit a candidate on the row at which point
- * we return.
- * @param set
- * @param firstOnRow First possible key on this row.
- * @param state
- * @return True if we found a candidate walking this row.
- */
- private boolean walkForwardInSingleRow(final SortedSet<Cell> set,
- final Cell firstOnRow, final GetClosestRowBeforeTracker state) {
- boolean foundCandidate = false;
- SortedSet<Cell> tail = set.tailSet(firstOnRow);
- if (tail.isEmpty()) return foundCandidate;
- for (Iterator<Cell> i = tail.iterator(); i.hasNext();) {
- Cell kv = i.next();
- // Did we go beyond the target row? If so break.
- if (state.isTooFar(kv, firstOnRow)) break;
- if (state.isExpired(kv)) {
- i.remove();
- continue;
- }
- // If we added something, this row is a contender. break.
- if (state.handle(kv)) {
- foundCandidate = true;
- break;
- }
- }
- return foundCandidate;
- }
-
- /*
- * Walk backwards through the passed set a row at a time until we run out of
- * set or until we get a candidate.
- * @param set
- * @param state
- */
- private void getRowKeyBefore(NavigableSet<Cell> set,
- final GetClosestRowBeforeTracker state) {
- Cell firstOnRow = state.getTargetKey();
- for (Member p = memberOfPreviousRow(set, state, firstOnRow);
- p != null; p = memberOfPreviousRow(p.set, state, firstOnRow)) {
- // Make sure we don't fall out of our table.
- if (!state.isTargetTable(p.cell)) break;
- // Stop looking if we've exited the better candidate range.
- if (!state.isBetterCandidate(p.cell)) break;
- // Make into firstOnRow
- firstOnRow = new KeyValue(p.cell.getRowArray(), p.cell.getRowOffset(), p.cell.getRowLength(),
- HConstants.LATEST_TIMESTAMP);
- // If we find something, break;
- if (walkForwardInSingleRow(p.set, firstOnRow, state)) break;
- }
- }
-
- /**
* Only used by tests. TODO: Remove
*
* Given the specs of a column, update it, first by inserting a new record,
@@ -622,42 +542,6 @@ public class DefaultMemStore implements MemStore {
return addedSize;
}
- /*
- * Immutable data structure to hold member found in set and the set it was
- * found in. Include set because it is carrying context.
- */
- private static class Member {
- final Cell cell;
- final NavigableSet<Cell> set;
- Member(final NavigableSet<Cell> s, final Cell kv) {
- this.cell = kv;
- this.set = s;
- }
- }
-
- /*
- * @param set Set to walk back in. Pass a first in row or we'll return
- * same row (loop).
- * @param state Utility and context.
- * @param firstOnRow First item on the row after the one we want to find a
- * member in.
- * @return Null or member of row previous to <code>firstOnRow</code>
- */
- private Member memberOfPreviousRow(NavigableSet<Cell> set,
- final GetClosestRowBeforeTracker state, final Cell firstOnRow) {
- NavigableSet<Cell> head = set.headSet(firstOnRow, false);
- if (head.isEmpty()) return null;
- for (Iterator<Cell> i = head.descendingIterator(); i.hasNext();) {
- Cell found = i.next();
- if (state.isExpired(found)) {
- i.remove();
- continue;
- }
- return new Member(head, found);
- }
- return null;
- }
-
/**
* @return scanner on memstore and snapshot in this order.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
deleted file mode 100644
index 2df4286..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.util.NavigableMap;
-import java.util.NavigableSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}.
- * Like {@link ScanQueryMatcher} and {@link ScanDeleteTracker} but does not
- * implement the {@link DeleteTracker} interface since state spans rows (There
- * is no update nor reset method).
- */
-@InterfaceAudience.Private
-class GetClosestRowBeforeTracker {
- private final KeyValue targetkey;
- // Any cell w/ a ts older than this is expired.
- private final long now;
- private final long oldestUnexpiredTs;
- private Cell candidate = null;
- private final CellComparator cellComparator;
- // Flag for whether we're doing getclosest on a metaregion.
- private final boolean metaregion;
- // Offset and length into targetkey demarking table name (if in a metaregion).
- private final int rowoffset;
- private final int tablenamePlusDelimiterLength;
-
- // Deletes keyed by row. Comparator compares on row portion of KeyValue only.
- private final NavigableMap<Cell, NavigableSet<Cell>> deletes;
-
- /**
- * @param c
- * @param kv Presume first on row: i.e. empty column, maximum timestamp and
- * a type of Type.Maximum
- * @param ttl Time to live in ms for this Store
- * @param metaregion True if this is hbase:meta or -ROOT- region.
- */
- GetClosestRowBeforeTracker(final CellComparator c, final KeyValue kv,
- final long ttl, final boolean metaregion) {
- super();
- this.metaregion = metaregion;
- this.targetkey = kv;
- // If we are in a metaregion, then our table name is the prefix on the
- // targetkey.
- this.rowoffset = kv.getRowOffset();
- int l = -1;
- if (metaregion) {
- l = Bytes.searchDelimiterIndex(kv.getRowArray(), rowoffset, kv.getRowLength(),
- HConstants.DELIMITER) - this.rowoffset;
- }
- this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
- this.now = System.currentTimeMillis();
- this.oldestUnexpiredTs = now - ttl;
- this.cellComparator = c;
- this.deletes = new TreeMap<Cell, NavigableSet<Cell>>(new CellComparator.RowComparator());
- }
-
- /*
- * Add the specified KeyValue to the list of deletes.
- * @param kv
- */
- private void addDelete(final Cell kv) {
- NavigableSet<Cell> rowdeletes = this.deletes.get(kv);
- if (rowdeletes == null) {
- rowdeletes = new TreeSet<Cell>(this.cellComparator);
- this.deletes.put(kv, rowdeletes);
- }
- rowdeletes.add(kv);
- }
-
- /*
- * @param kv Adds candidate if nearer the target than previous candidate.
- * @return True if updated candidate.
- */
- private boolean addCandidate(final Cell kv) {
- if (!isDeleted(kv) && isBetterCandidate(kv)) {
- this.candidate = kv;
- return true;
- }
- return false;
- }
-
- boolean isBetterCandidate(final Cell contender) {
- return this.candidate == null ||
- (this.cellComparator.compareRows(this.candidate, contender) < 0 &&
- this.cellComparator.compareRows(contender, this.targetkey) <= 0);
- }
-
- /*
- * Check if specified KeyValue buffer has been deleted by a previously
- * seen delete.
- * @param kv
- * @return true is the specified KeyValue is deleted, false if not
- */
- private boolean isDeleted(final Cell kv) {
- if (this.deletes.isEmpty()) return false;
- NavigableSet<Cell> rowdeletes = this.deletes.get(kv);
- if (rowdeletes == null || rowdeletes.isEmpty()) return false;
- return isDeleted(kv, rowdeletes);
- }
-
- /**
- * Check if the specified KeyValue buffer has been deleted by a previously
- * seen delete.
- * @param kv
- * @param ds
- * @return True is the specified KeyValue is deleted, false if not
- */
- public boolean isDeleted(final Cell kv, final NavigableSet<Cell> ds) {
- if (deletes == null || deletes.isEmpty()) return false;
- for (Cell d: ds) {
- long kvts = kv.getTimestamp();
- long dts = d.getTimestamp();
- if (CellUtil.isDeleteFamily(d)) {
- if (kvts <= dts) return true;
- continue;
- }
- // Check column
- int ret = CellComparator.compareQualifiers(kv, d);
- if (ret <= -1) {
- // This delete is for an earlier column.
- continue;
- } else if (ret >= 1) {
- // Beyond this kv.
- break;
- }
- // Check Timestamp
- if (kvts > dts) return false;
-
- // Check Type
- switch (KeyValue.Type.codeToType(d.getTypeByte())) {
- case Delete: return kvts == dts;
- case DeleteColumn: return true;
- default: continue;
- }
- }
- return false;
- }
-
- /**
- * @param cell
- * @return true if the cell is expired
- */
- public boolean isExpired(final Cell cell) {
- return cell.getTimestamp() < this.oldestUnexpiredTs ||
- HStore.isCellTTLExpired(cell, this.oldestUnexpiredTs, this.now);
- }
-
- /*
- * Handle keys whose values hold deletes.
- * Add to the set of deletes and then if the candidate keys contain any that
- * might match, then check for a match and remove it. Implies candidates
- * is made with a Comparator that ignores key type.
- * @param kv
- * @return True if we removed <code>k</code> from <code>candidates</code>.
- */
- boolean handleDeletes(final Cell kv) {
- addDelete(kv);
- boolean deleted = false;
- if (!hasCandidate()) return deleted;
- if (isDeleted(this.candidate)) {
- this.candidate = null;
- deleted = true;
- }
- return deleted;
- }
-
- /**
- * Do right thing with passed key, add to deletes or add to candidates.
- * @param kv
- * @return True if we added a candidate
- */
- boolean handle(final Cell kv) {
- if (CellUtil.isDelete(kv)) {
- handleDeletes(kv);
- return false;
- }
- return addCandidate(kv);
- }
-
- /**
- * @return True if has candidate
- */
- public boolean hasCandidate() {
- return this.candidate != null;
- }
-
- /**
- * @return Best candidate or null.
- */
- public Cell getCandidate() {
- return this.candidate;
- }
-
- public KeyValue getTargetKey() {
- return this.targetkey;
- }
-
- /**
- * @param kv Current kv
- * @param firstOnRow on row kv.
- * @return True if we went too far, past the target key.
- */
- boolean isTooFar(final Cell kv, final Cell firstOnRow) {
- return this.cellComparator.compareRows(kv, firstOnRow) > 0;
- }
-
- boolean isTargetTable(final Cell kv) {
- if (!metaregion) return true;
- // Compare start of keys row. Compare including delimiter. Saves having
- // to calculate where tablename ends in the candidate kv.
- return Bytes.compareTo(this.targetkey.getRowArray(), this.rowoffset,
- this.tablenamePlusDelimiterLength,
- kv.getRowArray(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d4e89e0..af4271a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2431,38 +2431,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
//////////////////////////////////////////////////////////////////////////////
@Override
- public Result getClosestRowBefore(final byte [] row, final byte [] family) throws IOException {
- if (coprocessorHost != null) {
- Result result = new Result();
- if (coprocessorHost.preGetClosestRowBefore(row, family, result)) {
- return result;
- }
- }
- // look across all the HStores for this region and determine what the
- // closest key is across all column families, since the data may be sparse
- checkRow(row, "getClosestRowBefore");
- startRegionOperation(Operation.GET);
- this.readRequestsCount.increment();
- try {
- Store store = getStore(family);
- // get the closest key. (HStore.getRowKeyAtOrBefore can return null)
- Cell key = store.getRowKeyAtOrBefore(row);
- Result result = null;
- if (key != null) {
- Get get = new Get(CellUtil.cloneRow(key));
- get.addFamily(family);
- result = get(get);
- }
- if (coprocessorHost != null) {
- coprocessorHost.postGetClosestRowBefore(row, family, result);
- }
- return result;
- } finally {
- closeRegionOperation(Operation.GET);
- }
- }
-
- @Override
public RegionScanner getScanner(Scan scan) throws IOException {
return getScanner(scan, null);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6db268/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 57ca3f1..07d51c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1775,154 +1775,6 @@ public class HStore implements Store {
}
@Override
- public Cell getRowKeyAtOrBefore(final byte[] row) throws IOException {
- // If minVersions is set, we will not ignore expired KVs.
- // As we're only looking for the latest matches, that should be OK.
- // With minVersions > 0 we guarantee that any KV that has any version
- // at all (expired or not) has at least one version that will not expire.
- // Note that this method used to take a KeyValue as arguments. KeyValue
- // can be back-dated, a row key cannot.
- long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl();
-
- KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP);
-
- GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker(
- this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion());
- this.lock.readLock().lock();
- try {
- // First go to the memstore. Pick up deletes and candidates.
- this.memstore.getRowKeyAtOrBefore(state);
- // Check if match, if we got a candidate on the asked for 'kv' row.
- // Process each relevant store file. Run through from newest to oldest.
- Iterator<StoreFile> sfIterator = this.storeEngine.getStoreFileManager()
- .getCandidateFilesForRowKeyBefore(state.getTargetKey());
- while (sfIterator.hasNext()) {
- StoreFile sf = sfIterator.next();
- sfIterator.remove(); // Remove sf from iterator.
- boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state);
- Cell candidate = state.getCandidate();
- // we have an optimization here which stops the search if we find exact match.
- if (candidate != null && CellUtil.matchingRow(candidate, row)) {
- return candidate;
- }
- if (haveNewCandidate) {
- sfIterator = this.storeEngine.getStoreFileManager().updateCandidateFilesForRowKeyBefore(
- sfIterator, state.getTargetKey(), candidate);
- }
- }
- return state.getCandidate();
- } finally {
- this.lock.readLock().unlock();
- }
- }
-
- /*
- * Check an individual MapFile for the row at or before a given row.
- * @param f
- * @param state
- * @throws IOException
- * @return True iff the candidate has been updated in the state.
- */
- private boolean rowAtOrBeforeFromStoreFile(final StoreFile f,
- final GetClosestRowBeforeTracker state)
- throws IOException {
- StoreFile.Reader r = f.getReader();
- if (r == null) {
- LOG.warn("StoreFile " + f + " has a null Reader");
- return false;
- }
- if (r.getEntries() == 0) {
- LOG.warn("StoreFile " + f + " is a empty store file");
- return false;
- }
- // TODO: Cache these keys rather than make each time?
- Cell firstKV = r.getFirstKey();
- if (firstKV == null) return false;
- Cell lastKV = r.getLastKey();
- Cell firstOnRow = state.getTargetKey();
- if (this.comparator.compareRows(lastKV, firstOnRow) < 0) {
- // If last key in file is not of the target table, no candidates in this
- // file. Return.
- if (!state.isTargetTable(lastKV)) return false;
- // If the row we're looking for is past the end of file, set search key to
- // last key. TODO: Cache last and first key rather than make each time.
- firstOnRow = CellUtil.createFirstOnRow(lastKV);
- }
- // Get a scanner that caches blocks and that uses pread.
- HFileScanner scanner = r.getScanner(true, true, false);
- // Seek scanner. If can't seek it, return.
- if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
- // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN!
- // Unlikely that there'll be an instance of actual first row in table.
- if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
- // If here, need to start backing up.
- while (scanner.seekBefore(firstOnRow)) {
- Cell kv = scanner.getCell();
- if (!state.isTargetTable(kv)) break;
- if (!state.isBetterCandidate(kv)) break;
- // Make new first on row.
- firstOnRow = CellUtil.createFirstOnRow(kv);
- // Seek scanner. If can't seek it, break.
- if (!seekToScanner(scanner, firstOnRow, firstKV)) return false;
- // If we find something, break;
- if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true;
- }
- return false;
- }
-
- /*
- * Seek the file scanner to firstOnRow or first entry in file.
- * @param scanner
- * @param firstOnRow
- * @param firstKV
- * @return True if we successfully seeked scanner.
- * @throws IOException
- */
- private boolean seekToScanner(final HFileScanner scanner,
- final Cell firstOnRow,
- final Cell firstKV)
- throws IOException {
- Cell kv = firstOnRow;
- // If firstOnRow < firstKV, set to firstKV
- if (this.comparator.compareRows(firstKV, firstOnRow) == 0) kv = firstKV;
- int result = scanner.seekTo(kv);
- return result != -1;
- }
-
- /*
- * When we come in here, we are probably at the kv just before we break into
- * the row that firstOnRow is on. Usually need to increment one time to get
- * on to the row we are interested in.
- * @param scanner
- * @param firstOnRow
- * @param state
- * @return True we found a candidate.
- * @throws IOException
- */
- private boolean walkForwardInSingleRow(final HFileScanner scanner,
- final Cell firstOnRow,
- final GetClosestRowBeforeTracker state)
- throws IOException {
- boolean foundCandidate = false;
- do {
- Cell kv = scanner.getCell();
- // If we are not in the row, skip.
- if (this.comparator.compareRows(kv, firstOnRow) < 0) continue;
- // Did we go beyond the target row? If so break.
- if (state.isTooFar(kv, firstOnRow)) break;
- if (state.isExpired(kv)) {
- continue;
- }
- // If we added something, this row is a contender. break.
- if (state.handle(kv)) {
- foundCandidate = true;
- break;
- }
- } while(scanner.next());
- return foundCandidate;
- }
-
- @Override
public boolean canSplit() {
this.lock.readLock().lock();
try {