You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2017/08/02 00:28:05 UTC
[1/3] hbase git commit: HBASE-18395 Update clock on region open and
close (revision 5)
Repository: hbase
Updated Branches:
refs/heads/HBASE-14070.HLC 0f52f68de -> 386b1df1d
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 29c0576..edd609f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -426,7 +426,7 @@ public class TestMasterFailover {
log("Master has aborted");
rs.getRSRpcServices().closeRegion(null, ProtobufUtil.buildCloseRegionRequest(
- rs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()));
+ rs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO.getEncodedName(), null));
// Start up a new master
log("Starting up a new master");
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index fe0e7b1..9962c3d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -216,7 +216,7 @@ public class TestMasterNoCluster {
// Fake a successful close.
Mockito.doReturn(true).when(spy).
sendRegionClose((ServerName)Mockito.any(), (HRegionInfo)Mockito.any(),
- (ServerName)Mockito.any());
+ (ServerName)Mockito.any(), (Long)Mockito.any());
return spy;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 71bd01b..aceff82 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -173,7 +173,7 @@ public class TestHRegionReplayEvents {
when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
when(rss.getConfiguration()).thenReturn(CONF);
when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(CONF));
- when(rss.getRegionServerClock((ClockType)any())).thenReturn(new Clock.System());
+ when(rss.getClock((ClockType)any())).thenReturn(new Clock.System());
String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER
.toString();
ExecutorService es = new ExecutorService(string);
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index 5cf351f..e2ec7ea 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -153,7 +153,7 @@ public class TestRegionServerNoMaster {
public static void openRegion(HBaseTestingUtility HTU, HRegionServer rs, HRegionInfo hri)
throws Exception {
AdminProtos.OpenRegionRequest orr =
- RequestConverter.buildOpenRegionRequest(rs.getServerName(), hri, null, null);
+ RequestConverter.buildOpenRegionRequest(rs.getServerName(), hri, null, null, null);
AdminProtos.OpenRegionResponse responseOpen = rs.rpcServices.openRegion(null, orr);
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
@@ -176,7 +176,7 @@ public class TestRegionServerNoMaster {
public static void closeRegion(HBaseTestingUtility HTU, HRegionServer rs, HRegionInfo hri)
throws Exception {
AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest(
- rs.getServerName(), hri.getEncodedName());
+ rs.getServerName(), hri.getEncodedName(), null);
AdminProtos.CloseRegionResponse responseClose = rs.rpcServices.closeRegion(null, crr);
Assert.assertTrue(responseClose.getClosed());
checkRegionIsClosed(HTU, rs, hri);
@@ -220,7 +220,7 @@ public class TestRegionServerNoMaster {
public void testMultipleCloseFromMaster() throws Exception {
for (int i = 0; i < 10; i++) {
AdminProtos.CloseRegionRequest crr =
- ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName, null);
+ ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName);
try {
AdminProtos.CloseRegionResponse responseClose = getRS().rpcServices.closeRegion(null, crr);
Assert.assertTrue("request " + i + " failed",
@@ -295,7 +295,7 @@ public class TestRegionServerNoMaster {
closeRegionNoZK();
try {
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(
- earlierServerName, hri, null, null);
+ earlierServerName, hri, null, null, null);
getRS().getRSRpcServices().openRegion(null, orr);
Assert.fail("The openRegion should have been rejected");
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException se) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
index 777cf2e..9c33a9b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
@@ -108,7 +108,7 @@ public class TestRegionSplitPolicy {
final List<Region> regions = new ArrayList<>();
Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions);
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
- Mockito.when(rss.getRegionServerClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
+ Mockito.when(rss.getClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
// Set max size for this 'table'.
long maxSplitSize = 1024L;
htd.setMaxFileSize(maxSplitSize);
@@ -170,7 +170,7 @@ public class TestRegionSplitPolicy {
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L);
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(0L);
- Mockito.when(rss.getRegionServerClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
+ Mockito.when(rss.getClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
BusyRegionSplitPolicy policy =
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 80f8a4d..c866833 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -205,7 +205,7 @@ public class TestWALLockup {
Mockito.when(server.isStopped()).thenReturn(false);
Mockito.when(server.isAborted()).thenReturn(false);
RegionServerServices services = Mockito.mock(RegionServerServices.class);
- Mockito.when(services.getRegionServerClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
+ Mockito.when(services.getClock(ClockType.SYSTEM)).thenReturn(new Clock.System());
// OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL, go ahead with test.
FileSystem fs = FileSystem.get(CONF);
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index b403560..03f17cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -62,9 +62,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
@@ -689,7 +687,7 @@ public abstract class AbstractTestWALReplay {
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
Mockito.doReturn(false).when(rsServices).isAborted();
- when(rsServices.getRegionServerClock(clock.getClockType())).thenReturn(clock);
+ when(rsServices.getClock(clock.getClockType())).thenReturn(clock);
when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
Configuration customConf = new Configuration(this.conf);
customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
[3/3] hbase git commit: HBASE-18395 Update clock on region open and
close (revision 5)
Posted by ap...@apache.org.
HBASE-18395 Update clock on region open and close (revision 5)
-Addition of a new protobuf message type that contains a field for a timestamp
-Setting of timestamp field in building region open/close request and response messages
-Updating the clock upon receiving message
-Added updating of clock upon region open with highest TS from region store files (TODO: add test)
-Added explicit asserts on expected logical time for TestClockWithCluster integration tests
Change-Id: Idb5484750d832cb6e5e2f29bbf418870b84efc27
Signed-off-by: Apekshit Sharma <ap...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/386b1df1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/386b1df1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/386b1df1
Branch: refs/heads/HBASE-14070.HLC
Commit: 386b1df1dd9c44bdf5d60d9519c8179af2f75ddb
Parents: 0f52f68
Author: Amit Patel <ia...@gmail.com>
Authored: Thu Jun 15 13:30:06 2017 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Tue Aug 1 17:24:31 2017 -0700
----------------------------------------------------------------------
.../apache/hadoop/hbase/client/HBaseAdmin.java | 2 +-
.../hbase/shaded/protobuf/ProtobufUtil.java | 19 +-
.../hbase/shaded/protobuf/RequestConverter.java | 13 +-
.../java/org/apache/hadoop/hbase/Clock.java | 6 +
.../shaded/protobuf/generated/AdminProtos.java | 1353 ++++++++++++++++--
.../shaded/protobuf/generated/HBaseProtos.java | 507 ++++++-
.../src/main/protobuf/Admin.proto | 8 +
.../src/main/protobuf/HBase.proto | 8 +
.../hbase/protobuf/generated/ClientProtos.java | 2 +-
.../hbase/protobuf/generated/QuotaProtos.java | 40 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 21 +
.../hadoop/hbase/master/MasterServices.java | 14 +
.../hadoop/hbase/master/ServerManager.java | 14 +-
.../master/procedure/RSProcedureDispatcher.java | 46 +-
.../hadoop/hbase/regionserver/HRegion.java | 9 +-
.../hbase/regionserver/HRegionServer.java | 53 +-
.../hadoop/hbase/regionserver/HStore.java | 5 +-
.../hbase/regionserver/RSRpcServices.java | 35 +-
.../regionserver/RegionServerServices.java | 12 +-
.../hadoop/hbase/regionserver/StoreUtils.java | 8 +
.../hadoop/hbase/MockRegionServerServices.java | 10 +-
.../hadoop/hbase/TestClockWithCluster.java | 293 +++-
.../hadoop/hbase/client/TestReplicasClient.java | 4 +-
.../coprocessor/TestIncrementTimeRange.java | 2 +
.../hbase/master/MockNoopMasterServices.java | 8 +
.../hadoop/hbase/master/MockRegionServer.java | 11 +-
.../hadoop/hbase/master/TestMasterFailover.java | 2 +-
.../hbase/master/TestMasterNoCluster.java | 2 +-
.../regionserver/TestHRegionReplayEvents.java | 2 +-
.../regionserver/TestRegionServerNoMaster.java | 8 +-
.../regionserver/TestRegionSplitPolicy.java | 4 +-
.../hbase/regionserver/TestWALLockup.java | 2 +-
.../regionserver/wal/AbstractTestWALReplay.java | 4 +-
33 files changed, 2264 insertions(+), 263 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index fb9df62..c168c9c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1181,7 +1181,7 @@ public class HBaseAdmin implements Admin {
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
// Close the region without updating zk state.
CloseRegionRequest request =
- ProtobufUtil.buildCloseRegionRequest(sn, encodedRegionName);
+ ProtobufUtil.buildCloseRegionRequest(sn, encodedRegionName, null);
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = this.rpcControllerFactory.newController();
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index b1b52b1..4ebba53 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -1860,10 +1860,10 @@ public final class ProtobufUtil {
public static boolean closeRegion(final RpcController controller,
final AdminService.BlockingInterface admin,
final ServerName server, final byte[] regionName,
- final ServerName destinationServer) throws IOException {
+ final ServerName destinationServer, Long masterClockTime) throws IOException {
CloseRegionRequest closeRegionRequest =
ProtobufUtil.buildCloseRegionRequest(server,
- regionName, destinationServer);
+ regionName, destinationServer, masterClockTime);
try {
CloseRegionResponse response = admin.closeRegion(controller, closeRegionRequest);
return ResponseConverter.isClosed(response);
@@ -1903,7 +1903,7 @@ public final class ProtobufUtil {
final AdminService.BlockingInterface admin, ServerName server, final HRegionInfo region)
throws IOException {
OpenRegionRequest request =
- RequestConverter.buildOpenRegionRequest(server, region, null, null);
+ RequestConverter.buildOpenRegionRequest(server, region, null, null, null);
try {
admin.openRegion(controller, request);
} catch (ServiceException se) {
@@ -3316,11 +3316,11 @@ public final class ProtobufUtil {
*/
public static CloseRegionRequest buildCloseRegionRequest(ServerName server,
final byte[] regionName) {
- return ProtobufUtil.buildCloseRegionRequest(server, regionName, null);
+ return ProtobufUtil.buildCloseRegionRequest(server, regionName, null, null);
}
public static CloseRegionRequest buildCloseRegionRequest(ServerName server,
- final byte[] regionName, ServerName destinationServer) {
+ final byte[] regionName, ServerName destinationServer, Long masterClockTime) {
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
RegionSpecifier region = RequestConverter.buildRegionSpecifier(
RegionSpecifierType.REGION_NAME, regionName);
@@ -3331,6 +3331,9 @@ public final class ProtobufUtil {
if (server != null) {
builder.setServerStartCode(server.getStartcode());
}
+ if (masterClockTime != null) {
+ builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(masterClockTime));
+ }
return builder.build();
}
@@ -3341,7 +3344,8 @@ public final class ProtobufUtil {
* @return a CloseRegionRequest
*/
public static CloseRegionRequest
- buildCloseRegionRequest(ServerName server, final String encodedRegionName) {
+ buildCloseRegionRequest(ServerName server, final String encodedRegionName,
+ Long masterClockTime) {
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
RegionSpecifier region = RequestConverter.buildRegionSpecifier(
RegionSpecifierType.ENCODED_REGION_NAME,
@@ -3350,6 +3354,9 @@ public final class ProtobufUtil {
if (server != null) {
builder.setServerStartCode(server.getStartcode());
}
+ if (masterClockTime != null) {
+ builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(masterClockTime));
+ }
return builder.build();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index e84a85f..52ce3e9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -877,11 +878,12 @@ public final class RequestConverter {
* @param server the serverName for the RPC
* @param regionOpenInfos info of a list of regions to open
* @param openForReplay
+ * @param masterClockTime timestamp generated by master's clock
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest
buildOpenRegionRequest(ServerName server, final List<Pair<HRegionInfo,
- List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
+ List<ServerName>>> regionOpenInfos, Boolean openForReplay, Long masterClockTime) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
for (Pair<HRegionInfo, List<ServerName>> regionOpenInfo: regionOpenInfos) {
builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(),
@@ -890,6 +892,9 @@ public final class RequestConverter {
if (server != null) {
builder.setServerStartCode(server.getStartcode());
}
+ if (masterClockTime != null) {
+ builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(masterClockTime));
+ }
// send the master's wall clock time as well, so that the RS can refer to it
builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
return builder.build();
@@ -902,11 +907,12 @@ public final class RequestConverter {
* @param region the region to open
* @param favoredNodes
* @param openForReplay
+ * @param masterClockTime timestamp generated by master's clock
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest buildOpenRegionRequest(ServerName server,
final HRegionInfo region, List<ServerName> favoredNodes,
- Boolean openForReplay) {
+ Boolean openForReplay, Long masterClockTime) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes,
openForReplay));
@@ -914,6 +920,9 @@ public final class RequestConverter {
builder.setServerStartCode(server.getStartcode());
}
builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
+ if (masterClockTime != null) {
+ builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(masterClockTime));
+ }
return builder.build();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-common/src/main/java/org/apache/hadoop/hbase/Clock.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Clock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Clock.java
index abc6252..0e2320f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Clock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Clock.java
@@ -365,6 +365,12 @@ public interface Clock {
this.physicalTime = physicalTime;
}
+ @VisibleForTesting
+ synchronized long getLogicalTime() { return logicalTime; }
+
+ @VisibleForTesting
+ synchronized long getPhysicalTime() { return physicalTime; }
+
@Override
public TimestampType getTimestampType() { return timestampType; }
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 639be5a..f739de6 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -4539,6 +4539,31 @@ public final class AdminProtos {
* <code>optional uint64 master_system_time = 5;</code>
*/
long getMasterSystemTime();
+
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ boolean hasNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.OpenRegionRequest}
@@ -4604,6 +4629,19 @@ public final class AdminProtos {
masterSystemTime_ = input.readUInt64();
break;
}
+ case 50: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = nodeTime_.toBuilder();
+ }
+ nodeTime_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(nodeTime_);
+ nodeTime_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
}
}
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -5890,6 +5928,39 @@ public final class AdminProtos {
return masterSystemTime_;
}
+ public static final int NODETIME_FIELD_NUMBER = 6;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
@@ -5917,6 +5988,9 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(5, masterSystemTime_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(6, getNodeTime());
+ }
unknownFields.writeTo(output);
}
@@ -5937,6 +6011,10 @@ public final class AdminProtos {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, masterSystemTime_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(6, getNodeTime());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -5966,6 +6044,11 @@ public final class AdminProtos {
result = result && (getMasterSystemTime()
== other.getMasterSystemTime());
}
+ result = result && (hasNodeTime() == other.hasNodeTime());
+ if (hasNodeTime()) {
+ result = result && getNodeTime()
+ .equals(other.getNodeTime());
+ }
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@@ -5991,6 +6074,10 @@ public final class AdminProtos {
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
getMasterSystemTime());
}
+ if (hasNodeTime()) {
+ hash = (37 * hash) + NODETIME_FIELD_NUMBER;
+ hash = (53 * hash) + getNodeTime().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -6106,6 +6193,7 @@ public final class AdminProtos {
if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getOpenInfoFieldBuilder();
+ getNodeTimeFieldBuilder();
}
}
public Builder clear() {
@@ -6120,6 +6208,12 @@ public final class AdminProtos {
bitField0_ = (bitField0_ & ~0x00000002);
masterSystemTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@@ -6161,6 +6255,14 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000002;
}
result.masterSystemTime_ = masterSystemTime_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (nodeTimeBuilder_ == null) {
+ result.nodeTime_ = nodeTime_;
+ } else {
+ result.nodeTime_ = nodeTimeBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -6235,6 +6337,9 @@ public final class AdminProtos {
if (other.hasMasterSystemTime()) {
setMasterSystemTime(other.getMasterSystemTime());
}
+ if (other.hasNodeTime()) {
+ mergeNodeTime(other.getNodeTime());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -6603,6 +6708,160 @@ public final class AdminProtos {
onChanged();
return this;
}
+
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder> nodeTimeBuilder_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ } else {
+ return nodeTimeBuilder_.getMessage();
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder setNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ nodeTime_ = value;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder setNodeTime(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder builderForValue) {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = builderForValue.build();
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder mergeNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ nodeTime_ != null &&
+ nodeTime_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance()) {
+ nodeTime_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.newBuilder(nodeTime_).mergeFrom(value).buildPartial();
+ } else {
+ nodeTime_ = value;
+ }
+ onChanged();
+ } else {
+ nodeTimeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder clearNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder getNodeTimeBuilder() {
+ bitField0_ |= 0x00000008;
+ onChanged();
+ return getNodeTimeFieldBuilder().getBuilder();
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ if (nodeTimeBuilder_ != null) {
+ return nodeTimeBuilder_.getMessageOrBuilder();
+ } else {
+ return nodeTime_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>
+ getNodeTimeFieldBuilder() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTimeBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>(
+ getNodeTime(),
+ getParentForChildren(),
+ isClean());
+ nodeTime_ = null;
+ }
+ return nodeTimeBuilder_;
+ }
public final Builder setUnknownFields(
final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
@@ -6668,6 +6927,31 @@ public final class AdminProtos {
* <code>repeated .hbase.pb.OpenRegionResponse.RegionOpeningState opening_state = 1;</code>
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState getOpeningState(int index);
+
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ boolean hasNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.OpenRegionResponse}
@@ -6745,6 +7029,19 @@ public final class AdminProtos {
input.popLimit(oldLimit);
break;
}
+ case 18: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = nodeTime_.toBuilder();
+ }
+ nodeTime_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(nodeTime_);
+ nodeTime_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
}
}
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -6871,6 +7168,7 @@ public final class AdminProtos {
// @@protoc_insertion_point(enum_scope:hbase.pb.OpenRegionResponse.RegionOpeningState)
}
+ private int bitField0_;
public static final int OPENING_STATE_FIELD_NUMBER = 1;
private java.util.List<java.lang.Integer> openingState_;
private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.ListAdapter.Converter<
@@ -6902,6 +7200,39 @@ public final class AdminProtos {
return openingState_converter_.convert(openingState_.get(index));
}
+ public static final int NODETIME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
@@ -6917,6 +7248,9 @@ public final class AdminProtos {
for (int i = 0; i < openingState_.size(); i++) {
output.writeEnum(1, openingState_.get(i));
}
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(2, getNodeTime());
+ }
unknownFields.writeTo(output);
}
@@ -6934,6 +7268,10 @@ public final class AdminProtos {
size += dataSize;
size += 1 * openingState_.size();
}
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, getNodeTime());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -6952,6 +7290,11 @@ public final class AdminProtos {
boolean result = true;
result = result && openingState_.equals(other.openingState_);
+ result = result && (hasNodeTime() == other.hasNodeTime());
+ if (hasNodeTime()) {
+ result = result && getNodeTime()
+ .equals(other.getNodeTime());
+ }
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@@ -6967,6 +7310,10 @@ public final class AdminProtos {
hash = (37 * hash) + OPENING_STATE_FIELD_NUMBER;
hash = (53 * hash) + openingState_.hashCode();
}
+ if (hasNodeTime()) {
+ hash = (37 * hash) + NODETIME_FIELD_NUMBER;
+ hash = (53 * hash) + getNodeTime().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -7081,12 +7428,19 @@ public final class AdminProtos {
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
+ getNodeTimeFieldBuilder();
}
}
public Builder clear() {
super.clear();
openingState_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@@ -7110,11 +7464,21 @@ public final class AdminProtos {
public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse buildPartial() {
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse(this);
int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
openingState_ = java.util.Collections.unmodifiableList(openingState_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.openingState_ = openingState_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (nodeTimeBuilder_ == null) {
+ result.nodeTime_ = nodeTime_;
+ } else {
+ result.nodeTime_ = nodeTimeBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -7166,6 +7530,9 @@ public final class AdminProtos {
}
onChanged();
}
+ if (other.hasNodeTime()) {
+ mergeNodeTime(other.getNodeTime());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -7267,42 +7634,196 @@ public final class AdminProtos {
onChanged();
return this;
}
- public final Builder setUnknownFields(
- final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
- return super.setUnknownFields(unknownFields);
- }
-
- public final Builder mergeUnknownFields(
- final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
- return super.mergeUnknownFields(unknownFields);
- }
-
- // @@protoc_insertion_point(builder_scope:hbase.pb.OpenRegionResponse)
- }
-
- // @@protoc_insertion_point(class_scope:hbase.pb.OpenRegionResponse)
- private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse DEFAULT_INSTANCE;
- static {
- DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse();
- }
-
- public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse getDefaultInstance() {
- return DEFAULT_INSTANCE;
- }
-
- @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<OpenRegionResponse>
- PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<OpenRegionResponse>() {
- public OpenRegionResponse parsePartialFrom(
- org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
- org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
- return new OpenRegionResponse(input, extensionRegistry);
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder> nodeTimeBuilder_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
- };
-
- public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<OpenRegionResponse> parser() {
- return PARSER;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ } else {
+ return nodeTimeBuilder_.getMessage();
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder setNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ nodeTime_ = value;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder setNodeTime(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder builderForValue) {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = builderForValue.build();
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder mergeNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ nodeTime_ != null &&
+ nodeTime_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance()) {
+ nodeTime_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.newBuilder(nodeTime_).mergeFrom(value).buildPartial();
+ } else {
+ nodeTime_ = value;
+ }
+ onChanged();
+ } else {
+ nodeTimeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder clearNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder getNodeTimeBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getNodeTimeFieldBuilder().getBuilder();
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ if (nodeTimeBuilder_ != null) {
+ return nodeTimeBuilder_.getMessageOrBuilder();
+ } else {
+ return nodeTime_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>
+ getNodeTimeFieldBuilder() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTimeBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>(
+ getNodeTime(),
+ getParentForChildren(),
+ isClean());
+ nodeTime_ = null;
+ }
+ return nodeTimeBuilder_;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.OpenRegionResponse)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.OpenRegionResponse)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<OpenRegionResponse>
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<OpenRegionResponse>() {
+ public OpenRegionResponse parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new OpenRegionResponse(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<OpenRegionResponse> parser() {
+ return PARSER;
}
@java.lang.Override
@@ -8345,6 +8866,31 @@ public final class AdminProtos {
* <code>optional uint64 serverStartCode = 5;</code>
*/
long getServerStartCode();
+
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ boolean hasNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder();
}
/**
* <pre>
@@ -8438,6 +8984,19 @@ public final class AdminProtos {
serverStartCode_ = input.readUInt64();
break;
}
+ case 50: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ subBuilder = nodeTime_.toBuilder();
+ }
+ nodeTime_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(nodeTime_);
+ nodeTime_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000020;
+ break;
+ }
}
}
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -8558,6 +9117,39 @@ public final class AdminProtos {
return serverStartCode_;
}
+ public static final int NODETIME_FIELD_NUMBER = 6;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
@@ -8599,6 +9191,9 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, serverStartCode_);
}
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeMessage(6, getNodeTime());
+ }
unknownFields.writeTo(output);
}
@@ -8627,6 +9222,10 @@ public final class AdminProtos {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, serverStartCode_);
}
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(6, getNodeTime());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -8669,6 +9268,11 @@ public final class AdminProtos {
result = result && (getServerStartCode()
== other.getServerStartCode());
}
+ result = result && (hasNodeTime() == other.hasNodeTime());
+ if (hasNodeTime()) {
+ result = result && getNodeTime()
+ .equals(other.getNodeTime());
+ }
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@@ -8702,6 +9306,10 @@ public final class AdminProtos {
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
getServerStartCode());
}
+ if (hasNodeTime()) {
+ hash = (37 * hash) + NODETIME_FIELD_NUMBER;
+ hash = (53 * hash) + getNodeTime().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -8824,6 +9432,7 @@ public final class AdminProtos {
.alwaysUseFieldBuilders) {
getRegionFieldBuilder();
getDestinationServerFieldBuilder();
+ getNodeTimeFieldBuilder();
}
}
public Builder clear() {
@@ -8846,6 +9455,12 @@ public final class AdminProtos {
bitField0_ = (bitField0_ & ~0x00000008);
serverStartCode_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
@@ -8898,6 +9513,14 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000010;
}
result.serverStartCode_ = serverStartCode_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ if (nodeTimeBuilder_ == null) {
+ result.nodeTime_ = nodeTime_;
+ } else {
+ result.nodeTime_ = nodeTimeBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -8955,6 +9578,9 @@ public final class AdminProtos {
if (other.hasServerStartCode()) {
setServerStartCode(other.getServerStartCode());
}
+ if (other.hasNodeTime()) {
+ mergeNodeTime(other.getNodeTime());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -9341,6 +9967,160 @@ public final class AdminProtos {
onChanged();
return this;
}
+
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder> nodeTimeBuilder_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ } else {
+ return nodeTimeBuilder_.getMessage();
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder setNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ nodeTime_ = value;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000020;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder setNodeTime(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder builderForValue) {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = builderForValue.build();
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000020;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder mergeNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (((bitField0_ & 0x00000020) == 0x00000020) &&
+ nodeTime_ != null &&
+ nodeTime_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance()) {
+ nodeTime_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.newBuilder(nodeTime_).mergeFrom(value).buildPartial();
+ } else {
+ nodeTime_ = value;
+ }
+ onChanged();
+ } else {
+ nodeTimeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000020;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public Builder clearNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000020);
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder getNodeTimeBuilder() {
+ bitField0_ |= 0x00000020;
+ onChanged();
+ return getNodeTimeFieldBuilder().getBuilder();
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ if (nodeTimeBuilder_ != null) {
+ return nodeTimeBuilder_.getMessageOrBuilder();
+ } else {
+ return nodeTime_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from master clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 6;</code>
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>
+ getNodeTimeFieldBuilder() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTimeBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>(
+ getNodeTime(),
+ getParentForChildren(),
+ isClean());
+ nodeTime_ = null;
+ }
+ return nodeTimeBuilder_;
+ }
public final Builder setUnknownFields(
final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
@@ -9402,6 +10182,31 @@ public final class AdminProtos {
* <code>required bool closed = 1;</code>
*/
boolean getClosed();
+
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ boolean hasNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime();
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.CloseRegionResponse}
@@ -9451,6 +10256,19 @@ public final class AdminProtos {
closed_ = input.readBool();
break;
}
+ case 18: {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = nodeTime_.toBuilder();
+ }
+ nodeTime_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(nodeTime_);
+ nodeTime_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
}
}
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -9491,6 +10309,39 @@ public final class AdminProtos {
return closed_;
}
+ public static final int NODETIME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
@@ -9510,6 +10361,9 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, closed_);
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, getNodeTime());
+ }
unknownFields.writeTo(output);
}
@@ -9522,6 +10376,10 @@ public final class AdminProtos {
size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
.computeBoolSize(1, closed_);
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, getNodeTime());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -9544,6 +10402,11 @@ public final class AdminProtos {
result = result && (getClosed()
== other.getClosed());
}
+ result = result && (hasNodeTime() == other.hasNodeTime());
+ if (hasNodeTime()) {
+ result = result && getNodeTime()
+ .equals(other.getNodeTime());
+ }
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@@ -9560,6 +10423,10 @@ public final class AdminProtos {
hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
getClosed());
}
+ if (hasNodeTime()) {
+ hash = (37 * hash) + NODETIME_FIELD_NUMBER;
+ hash = (53 * hash) + getNodeTime().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -9674,12 +10541,19 @@ public final class AdminProtos {
private void maybeForceBuilderInitialization() {
if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
+ getNodeTimeFieldBuilder();
}
}
public Builder clear() {
super.clear();
closed_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@@ -9708,6 +10582,14 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000001;
}
result.closed_ = closed_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (nodeTimeBuilder_ == null) {
+ result.nodeTime_ = nodeTime_;
+ } else {
+ result.nodeTime_ = nodeTimeBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -9753,6 +10635,9 @@ public final class AdminProtos {
if (other.hasClosed()) {
setClosed(other.getClosed());
}
+ if (other.hasNodeTime()) {
+ mergeNodeTime(other.getNodeTime());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -9815,6 +10700,160 @@ public final class AdminProtos {
onChanged();
return this;
}
+
+ private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime nodeTime_ = null;
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder> nodeTimeBuilder_;
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public boolean hasNodeTime() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ return nodeTime_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ } else {
+ return nodeTimeBuilder_.getMessage();
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder setNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ nodeTime_ = value;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder setNodeTime(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder builderForValue) {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = builderForValue.build();
+ onChanged();
+ } else {
+ nodeTimeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder mergeNodeTime(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime value) {
+ if (nodeTimeBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ nodeTime_ != null &&
+ nodeTime_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance()) {
+ nodeTime_ =
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.newBuilder(nodeTime_).mergeFrom(value).buildPartial();
+ } else {
+ nodeTime_ = value;
+ }
+ onChanged();
+ } else {
+ nodeTimeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public Builder clearNodeTime() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTime_ = null;
+ onChanged();
+ } else {
+ nodeTimeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder getNodeTimeBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getNodeTimeFieldBuilder().getBuilder();
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder getNodeTimeOrBuilder() {
+ if (nodeTimeBuilder_ != null) {
+ return nodeTimeBuilder_.getMessageOrBuilder();
+ } else {
+ return nodeTime_ == null ?
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance() : nodeTime_;
+ }
+ }
+ /**
+ * <pre>
+ * physical or hybrid timestamp from region server clock
+ * </pre>
+ *
+ * <code>optional .hbase.pb.NodeTime nodeTime = 2;</code>
+ */
+ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>
+ getNodeTimeFieldBuilder() {
+ if (nodeTimeBuilder_ == null) {
+ nodeTimeBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder>(
+ getNodeTime(),
+ getParentForChildren(),
+ isClean());
+ nodeTime_ = null;
+ }
+ return nodeTimeBuilder_;
+ }
public final Builder setUnknownFields(
final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
@@ -30447,125 +31486,129 @@ public final class AdminProtos {
"ponse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineRe" +
"gionRequest\"D\n\027GetOnlineRegionResponse\022)" +
"\n\013region_info\030\001 \003(\0132\024.hbase.pb.RegionInf" +
- "o\"\263\002\n\021OpenRegionRequest\022=\n\topen_info\030\001 \003" +
+ "o\"\331\002\n\021OpenRegionRequest\022=\n\topen_info\030\001 \003" +
"(\0132*.hbase.pb.OpenRegionRequest.RegionOp",
"enInfo\022\027\n\017serverStartCode\030\002 \001(\004\022\032\n\022maste" +
- "r_system_time\030\005 \001(\004\032\251\001\n\016RegionOpenInfo\022$" +
- "\n\006region\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027" +
- "version_of_offline_node\030\002 \001(\r\022+\n\rfavored" +
- "_nodes\030\003 \003(\0132\024.hbase.pb.ServerName\022#\n\033op" +
- "enForDistributedLogReplay\030\004 \001(\010\"\246\001\n\022Open" +
- "RegionResponse\022F\n\ropening_state\030\001 \003(\0162/." +
- "hbase.pb.OpenRegionResponse.RegionOpenin" +
- "gState\"H\n\022RegionOpeningState\022\n\n\006OPENED\020\000" +
- "\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002",
- "\"?\n\023WarmupRegionRequest\022(\n\nregionInfo\030\001 " +
- "\002(\0132\024.hbase.pb.RegionInfo\"\026\n\024WarmupRegio" +
- "nResponse\"\313\001\n\022CloseRegionRequest\022)\n\006regi" +
- "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\037\n\027v" +
- "ersion_of_closing_node\030\002 \001(\r\022\036\n\020transiti" +
- "on_in_ZK\030\003 \001(\010:\004true\0220\n\022destination_serv" +
- "er\030\004 \001(\0132\024.hbase.pb.ServerName\022\027\n\017server" +
- "StartCode\030\005 \001(\004\"%\n\023CloseRegionResponse\022\016" +
- "\n\006closed\030\001 \002(\010\"y\n\022FlushRegionRequest\022)\n\006" +
- "region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022",
- "\030\n\020if_older_than_ts\030\002 \001(\004\022\036\n\026write_flush" +
- "_wal_marker\030\003 \001(\010\"_\n\023FlushRegionResponse" +
- "\022\027\n\017last_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001" +
- "(\010\022\036\n\026wrote_flush_wal_marker\030\003 \001(\010\"T\n\022Sp" +
- "litRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase" +
- ".pb.RegionSpecifier\022\023\n\013split_point\030\002 \001(\014" +
- "\"\025\n\023SplitRegionResponse\"`\n\024CompactRegion" +
- "Request\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" +
- "nSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(" +
- "\014\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFav",
- "oredNodesRequest\022I\n\013update_info\030\001 \003(\01324." +
- "hbase.pb.UpdateFavoredNodesRequest.Regio" +
- "nUpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006regio" +
- "n\030\001 \002(\0132\024.hbase.pb.RegionInfo\022+\n\rfavored" +
- "_nodes\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032Up" +
- "dateFavoredNodesResponse\022\020\n\010response\030\001 \001" +
- "(\r\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.W" +
- "ALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025associ" +
- "ated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEn" +
- "tryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WAL",
- "Entry\022\034\n\024replicationClusterId\030\002 \001(\t\022\"\n\032s" +
- "ourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031sourc" +
- "eHFileArchiveDirPath\030\004 \001(\t\"\033\n\031ReplicateW" +
- "ALEntryResponse\"\026\n\024RollWALWriterRequest\"" +
- "0\n\025RollWALWriterResponse\022\027\n\017region_to_fl" +
- "ush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason" +
- "\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServe" +
- "rInfoRequest\"K\n\nServerInfo\022)\n\013server_nam" +
- "e\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nwebui_p" +
- "ort\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)\n\013se",
- "rver_info\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n" +
- "\032UpdateConfigurationRequest\"\035\n\033UpdateCon" +
- "figurationResponse\"?\n\024GetRegionLoadReque" +
- "st\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb.TableN" +
- "ame\"C\n\025GetRegionLoadResponse\022*\n\014region_l" +
- "oads\030\001 \003(\0132\024.hbase.pb.RegionLoad\"2\n\034Clea" +
- "rCompactionQueuesRequest\022\022\n\nqueue_name\030\001" +
- " \003(\t\"\037\n\035ClearCompactionQueuesResponse\"\200\001" +
- "\n\030ExecuteProceduresRequest\0220\n\013open_regio" +
- "n\030\001 \003(\0132\033.hbase.pb.OpenRegionRequest\0222\n\014",
- "close_region\030\002 \003(\0132\034.hbase.pb.CloseRegio" +
- "nRequest\"\203\001\n\031ExecuteProceduresResponse\0221" +
- "\n\013open_region\030\001 \003(\0132\034.hbase.pb.OpenRegio" +
- "nResponse\0223\n\014close_region\030\002 \003(\0132\035.hbase." +
- "pb.CloseRegionResponse\"\244\001\n\023MergeRegionsR" +
- "equest\022+\n\010region_a\030\001 \002(\0132\031.hbase.pb.Regi" +
- "onSpecifier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb" +
- ".RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005fals" +
- "e\022\032\n\022master_system_time\030\004 \001(\004\"\026\n\024MergeRe" +
- "gionsResponse2\216\016\n\014AdminService\022P\n\rGetReg",
- "ionInfo\022\036.hbase.pb.GetRegionInfoRequest\032" +
- "\037.hbase.pb.GetRegionInfoResponse\022M\n\014GetS" +
- "toreFile\022\035.hbase.pb.GetStoreFileRequest\032" +
- "\036.hbase.pb.GetStoreFileResponse\022V\n\017GetOn" +
- "lineRegion\022 .hbase.pb.GetOnlineRegionReq" +
- "uest\032!.hbase.pb.GetOnlineRegionResponse\022" +
- "G\n\nOpenRegion\022\033.hbase.pb.OpenRegionReque" +
- "st\032\034.hbase.pb.OpenRegionResponse\022M\n\014Warm" +
- "upRegion\022\035.hbase.pb.WarmupRegionRequest\032" +
- "\036.hbase.pb.WarmupRegionResponse\022J\n\013Close",
- "Region\022\034.hbase.pb.CloseRegionRequest\032\035.h" +
- "base.pb.CloseRegionResponse\022J\n\013FlushRegi" +
- "on\022\034.hbase.pb.FlushRegionRequest\032\035.hbase" +
- ".pb.FlushRegionResponse\022J\n\013SplitRegion\022\034" +
- ".hbase.pb.SplitRegionRequest\032\035.hbase.pb." +
- "SplitRegionResponse\022P\n\rCompactRegion\022\036.h" +
- "base.pb.CompactRegionRequest\032\037.hbase.pb." +
- "CompactRegionResponse\022\\\n\021ReplicateWALEnt" +
- "ry\022\".hbase.pb.ReplicateWALEntryRequest\032#" +
- ".hbase.pb.ReplicateWALEntryResponse\022Q\n\006R",
- "eplay\022\".hbase.pb.ReplicateWALEntryReques" +
- "t\032#.hbase.pb.ReplicateWALEntryResponse\022P" +
- "\n\rRollWALWriter\022\036.hbase.pb.RollWALWriter" +
- "Request\032\037.hbase.pb.RollWALWriterResponse" +
- "\022P\n\rGetServerInfo\022\036.hbase.pb.GetServerIn" +
- "foRequest\032\037.hbase.pb.GetServerInfoRespon" +
- "se\022G\n\nStopServer\022\033.hbase.pb.StopServerRe" +
- "quest\032\034.hbase.pb.StopServerResponse\022_\n\022U" +
- "pdateFavoredNodes\022#.hbase.pb.UpdateFavor" +
- "edNodesRequest\032$.hbase.pb.UpdateFavoredN",
- "odesResponse\022b\n\023UpdateConfiguration\022$.hb" +
- "ase.pb.UpdateConfigurationRequest\032%.hbas" +
- "e.pb.UpdateConfigurationResponse\022P\n\rGetR" +
- "egionLoad\022\036.hbase.pb.GetRegionLoadReques" +
- "t\032\037.hbase.pb.GetRegionLoadResponse\022h\n\025Cl" +
- "earCompactionQueues\022&.hbase.pb.ClearComp" +
- "actionQueuesRequest\032\'.hbase.pb.ClearComp" +
- "actionQueuesResponse\022k\n\026GetSpaceQuotaSna" +
- "pshots\022\'.hbase.pb.GetSpaceQuotaSnapshots" +
- "Request\032(.hbase.pb.GetSpaceQuotaSnapshot",
- "sResponse\022\\\n\021ExecuteProcedures\022\".hbase.p" +
- "b.ExecuteProceduresRequest\032#.hbase.pb.Ex" +
- "ecuteProceduresResponse\022M\n\014MergeRegions\022" +
- "\035.hbase.pb.MergeRegionsRequest\032\036.hbase.p" +
- "b.MergeRegionsResponseBH\n1org.apache.had" +
- "oop.hbase.shaded.protobuf.generatedB\013Adm" +
- "inProtosH\001\210\001\001\240\001\001"
+ "r_system_time\030\005 \001(\004\022$\n\010nodeTime\030\006 \001(\0132\022." +
+ "hbase.pb.NodeTime\032\251\001\n\016RegionOpenInfo\022$\n\006" +
+ "region\030\001 \002(\0132\024.hbase.pb.RegionInfo\022\037\n\027ve" +
+ "rsion_of_offline_node\030\002 \001(\r\022+\n\rfavored_n" +
+ "odes\030\003 \003(\0132\024.hbase.pb.ServerName\022#\n\033open" +
+ "ForDistributedLogReplay\030\004 \001(\010\"\314\001\n\022OpenRe" +
+ "gionResponse\022F\n\ropening_state\030\001 \003(\0162/.hb" +
+ "ase.pb.OpenRegionResponse.RegionOpeningS" +
+ "tate\022$\n\010nodeTime\030\002 \001(\0132\022.hbase.pb.NodeTi",
+ "me\"H\n\022RegionOpeningState\022\n\n\006OPENED\020\000\022\022\n\016" +
+ "ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"?\n\023" +
+ "WarmupRegionRequest\022(\n\nregionInfo\030\001 \002(\0132" +
+ "\024.hbase.pb.RegionInfo\"\026\n\024WarmupRegionRes" +
+ "ponse\"\361\001\n\022CloseRegionRequest\022)\n\006region\030\001" +
+ " \002(\0132\031.hbase.pb.RegionSpecifier\022\037\n\027versi" +
+ "on_of_closing_node\030\002 \001(\r\022\036\n\020transition_i" +
+ "n_ZK\030\003 \001(\010:\004true\0220\n\022destination_server\030\004" +
+ " \001(\0132\024.hbase.pb.ServerName\022\027\n\017serverStar" +
+ "tCode\030\005 \001(\004\022$\n\010nodeTime\030\006 \001(\0132\022.hbase.pb",
+ ".NodeTime\"K\n\023CloseRegionResponse\022\016\n\006clos" +
+ "ed\030\001 \002(\010\022$\n\010nodeTime\030\002 \001(\0132\022.hbase.pb.No" +
+ "deTime\"y\n\022FlushRegionRequest\022)\n\006region\030\001" +
+ " \002(\0132\031.hbase.pb.RegionSpecifier\022\030\n\020if_ol" +
+ "der_than_ts\030\002 \001(\004\022\036\n\026write_flush_wal_mar" +
+ "ker\030\003 \001(\010\"_\n\023FlushRegionResponse\022\027\n\017last" +
+ "_flush_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026wr" +
+ "ote_flush_wal_marker\030\003 \001(\010\"T\n\022SplitRegio" +
+ "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" +
+ "onSpecifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023Spli",
+ "tRegionResponse\"`\n\024CompactRegionRequest\022" +
+ ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
+ "er\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025Com" +
+ "pactRegionResponse\"\315\001\n\031UpdateFavoredNode" +
+ "sRequest\022I\n\013update_info\030\001 \003(\01324.hbase.pb" +
+ ".UpdateFavoredNodesRequest.RegionUpdateI" +
+ "nfo\032e\n\020RegionUpdateInfo\022$\n\006region\030\001 \002(\0132" +
+ "\024.hbase.pb.RegionInfo\022+\n\rfavored_nodes\030\002" +
+ " \003(\0132\024.hbase.pb.ServerName\".\n\032UpdateFavo" +
+ "redNodesResponse\022\020\n\010response\030\001 \001(\r\"a\n\010WA",
+ "LEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n" +
+ "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel" +
+ "l_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryReque" +
+ "st\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n" +
+ "\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceBas" +
+ "eNamespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileAr" +
+ "chiveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryR" +
+ "esponse\"\026\n\024RollWALWriterRequest\"0\n\025RollW" +
+ "ALWriterResponse\022\027\n\017region_to_flush\030\001 \003(" +
+ "\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024",
+ "\n\022StopServerResponse\"\026\n\024GetServerInfoReq" +
+ "uest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132" +
+ "\024.hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(" +
+ "\r\"B\n\025GetServerInfoResponse\022)\n\013server_inf" +
+ "o\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateC" +
+ "onfigurationRequest\"\035\n\033UpdateConfigurati" +
+ "onResponse\"?\n\024GetRegionLoadRequest\022\'\n\nta" +
+ "ble_name\030\001 \001(\0132\023.hbase.pb.TableName\"C\n\025G" +
+ "etRegionLoadResponse\022*\n\014region_loads\030\001 \003" +
+ "(\0132\024.hbase.pb.RegionLoad\"2\n\034ClearCompact",
+ "ionQueuesRequest\022\022\n\nqueue_name\030\001 \003(\t\"\037\n\035" +
+ "ClearCompactionQueuesResponse\"\200\001\n\030Execut" +
+ "eProceduresRequest\0220\n\013open_region\030\001 \003(\0132" +
+ "\033.hbase.pb.OpenRegionRequest\0222\n\014close_re" +
+ "gion\030\002 \003(\0132\034.hbase.pb.CloseRegionRequest" +
+ "\"\203\001\n\031ExecuteProceduresResponse\0221\n\013open_r" +
+ "egion\030\001 \003(\0132\034.hbase.pb.OpenRegionRespons" +
+ "e\0223\n\014close_region\030\002 \003(\0132\035.hbase.pb.Close" +
+ "RegionResponse\"\244\001\n\023MergeRegionsRequest\022+" +
+ "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif",
+ "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" +
+ "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022mas" +
+ "ter_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRes" +
+ "ponse2\216\016\n\014AdminService\022P\n\rGetRegionInfo\022" +
+ "\036.hbase.pb.GetRegionInfoRequest\032\037.hbase." +
+ "pb.GetRegionInfoResponse\022M\n\014GetStoreFile" +
+ "\022\035.hbase.pb.GetStoreFileRequest\032\036.hbase." +
+ "pb.GetStoreFileResponse\022V\n\017GetOnlineRegi" +
+ "on\022 .hbase.pb.GetOnlineRegionRequest\032!.h" +
+ "base.pb.GetOnlineRegionResponse\022G\n\nOpenR",
+ "egion\022\033.hbase.pb.OpenRegionRequest\032\034.hba" +
+ "se.pb.OpenRegionResponse\022M\n\014WarmupRegion" +
+ "\022\035.hbase.pb.WarmupRegionRequest\032\036.hbase." +
+ "pb.WarmupRegionResponse\022J\n\013CloseRegion\022\034" +
+ ".hbase.pb.CloseRegionRequest\032\035.hbase.pb." +
+ "CloseRegionResponse\022J\n\013FlushRegion\022\034.hba" +
+ "se.pb.FlushRegionRequest\032\035.hbase.pb.Flus" +
+ "hRegionResponse\022J\n\013SplitRegion\022\034.hbase.p" +
+ "b.SplitRegionRequest\032\035.hbase.pb.SplitReg" +
+ "ionResponse\022P\n\rCompactRegion\022\036.hbase.pb.",
+ "CompactRegionRequest\032\037.hbase.pb.CompactR" +
+ "egionResponse\022\\\n\021ReplicateWALEntry\022\".hba" +
+ "se.pb.ReplicateWALEntryRequest\032#.hbase.p" +
+ "b.ReplicateWALEntryResponse\022Q\n\006Replay\022\"." +
+ "hbase.pb.ReplicateWALEntryRequest\032#.hbas" +
+ "e.pb.ReplicateWALEntryResponse\022P\n\rRollWA" +
+ "LWriter\022\036.hbase.pb.RollWALWriterRequest\032" +
+ "\037.hbase.pb.RollWALWriterResponse\022P\n\rGetS" +
+ "erverInfo\022\036.hbase.pb.GetServerInfoReques" +
+ "t\032\037.hbase.pb.GetServerInfoResponse\022G\n\nSt",
+ "opServer\022\033.hbase.pb.StopServerRequest\032\034." +
+ "hbase.pb.StopServerResponse\022_\n\022UpdateFav" +
+ "oredNodes\022#.hbase.pb.UpdateFavoredNodesR" +
+ "equest\032$.hbase.pb.UpdateFavoredNodesResp" +
+ "onse\022b\n\023UpdateConfiguration\022$.hbase.pb.U" +
+ "pdateConfigurationRequest\032%.hbase.pb.Upd" +
+ "ateConfigurationResponse\022P\n\rGetRegionLoa" +
+ "d\022\036.hbase.pb.GetRegionLoadRequest\032\037.hbas" +
+ "e.pb.GetRegionLoadResponse\022h\n\025ClearCompa" +
+ "ctionQueues\022&.hbase.pb.ClearCompactionQu",
+ "euesRequest\032\'.hbase.pb.ClearCompactionQu" +
+ "euesResponse\022k\n\026GetSpaceQuotaSnapshots\022\'" +
+ ".hbase.pb.GetSpaceQuotaSnapshotsRequest\032" +
+ "(.hbase.pb.GetSpaceQuotaSnapshotsRespons" +
+ "e\022\\\n\021ExecuteProcedures\022\".hbase.pb.Execut" +
+ "eProceduresRequest\032#.hbase.pb.ExecutePro" +
+ "ceduresResponse\022M\n\014MergeRegions\022\035.hbase." +
+ "pb.MergeRegionsRequest\032\036.hbase.pb.MergeR" +
+ "egionsResponseBH\n1org.apache.hadoop.hbas" +
+ "e.shaded.protobuf.generatedB\013AdminProtos",
+ "H\001\210\001\001\240\001\001"
};
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@@ -30624,7 +31667,7 @@ public final class AdminProtos {
internal_static_hbase_pb_OpenRegionRequest_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_OpenRegionRequest_descriptor,
- new java.lang.String[] { "OpenInfo", "ServerStartCode", "MasterSystemTime", });
+ new java.lang.String[] { "OpenInfo", "ServerStartCode", "MasterSystemTime", "NodeTime", });
internal_static_hbase_pb_OpenRegionRequest_RegionOpenInfo_descriptor =
internal_static_hbase_pb_OpenRegionRequest_descriptor.getNestedTypes().get(0);
internal_static_hbase_pb_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable = new
@@ -30636,7 +31679,7 @@ public final class AdminProtos {
internal_static_hbase_pb_OpenRegionResponse_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_OpenRegionResponse_descriptor,
- new java.lang.String[] { "OpeningState", });
+ new java.lang.String[] { "OpeningState", "NodeTime", });
internal_static_hbase_pb_WarmupRegionRequest_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hbase_pb_WarmupRegionRequest_fieldAccessorTable = new
@@ -30654,13 +31697,13 @@ public final class AdminProtos {
internal_static_hbase_pb_CloseRegionRequest_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_CloseRegionRequest_descriptor,
- new java.lang.String[] { "Region", "VersionOfClosingNode", "TransitionInZK", "DestinationServer", "ServerStartCode", });
+ new java.lang.String[] { "Region", "VersionOfClosingNode", "TransitionInZK", "DestinationServer", "ServerStartCode", "NodeTime", });
internal_static_hbase_pb_CloseRegionResponse_descriptor =
getDescriptor().getMessageTypes().get(11);
internal_static_hbase_pb_CloseRegionResponse_fieldAccessorTable = new
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_CloseRegionResponse_descriptor,
- new java.lang.String[] { "Closed", });
+ new java.lang.String[] { "Closed", "NodeTime", });
internal_static_hbase_pb_FlushRegionRequest_descriptor =
getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_FlushRegionRequest_fieldAccessorTable = new
[2/3] hbase git commit: HBASE-18395 Update clock on region open and
close (revision 5)
Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
index b3b0831..cb1a47c 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
@@ -18997,6 +18997,485 @@ public final class HBaseProtos {
}
+ public interface NodeTimeOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:hbase.pb.NodeTime)
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ boolean hasTime();
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ long getTime();
+ }
+ /**
+ * <pre>
+ **
+ * Used to send timestamp of node. The timestamp can be interpreted as either a physical or hybrid
+ * timestamp using TimestampType.
+ * </pre>
+ *
+ * Protobuf type {@code hbase.pb.NodeTime}
+ */
+ public static final class NodeTime extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+ // @@protoc_insertion_point(message_implements:hbase.pb.NodeTime)
+ NodeTimeOrBuilder {
+ // Use NodeTime.newBuilder() to construct.
+ private NodeTime(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+ super(builder);
+ }
+ private NodeTime() {
+ time_ = 0L;
+ }
+
+ @java.lang.Override
+ public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private NodeTime(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ int mutable_bitField0_ = 0;
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ time_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+ e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.class, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder.class);
+ }
+
+ private int bitField0_;
+ public static final int TIME_FIELD_NUMBER = 1;
+ private long time_;
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ public boolean hasTime() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ public long getTime() {
+ return time_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, time_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, time_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime other = (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime) obj;
+
+ boolean result = true;
+ result = result && (hasTime() == other.hasTime());
+ if (hasTime()) {
+ result = result && (getTime()
+ == other.getTime());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasTime()) {
+ hash = (37 * hash) + TIME_FIELD_NUMBER;
+ hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+ getTime());
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(byte[] data)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+ byte[] data,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseDelimitedFrom(
+ java.io.InputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * <pre>
+ **
+ * Used to send timestamp of node. The timestamp can be interpreted as either a physical or hybrid
+ * timestamp using TimestampType.
+ * </pre>
+ *
+ * Protobuf type {@code hbase.pb.NodeTime}
+ */
+ public static final class Builder extends
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:hbase.pb.NodeTime)
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder {
+ public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_descriptor;
+ }
+
+ protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.class, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+ .alwaysUseFieldBuilders) {
+ }
+ }
+ public Builder clear() {
+ super.clear();
+ time_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime build() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime buildPartial() {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime result = new org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.time_ = time_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder clone() {
+ return (Builder) super.clone();
+ }
+ public Builder setField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.setField(field, value);
+ }
+ public Builder clearField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return (Builder) super.clearField(field);
+ }
+ public Builder clearOneof(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return (Builder) super.clearOneof(oneof);
+ }
+ public Builder setRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index, Object value) {
+ return (Builder) super.setRepeatedField(field, index, value);
+ }
+ public Builder addRepeatedField(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+ Object value) {
+ return (Builder) super.addRepeatedField(field, value);
+ }
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime) {
+ return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime other) {
+ if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance()) return this;
+ if (other.hasTime()) {
+ setTime(other.getTime());
+ }
+ this.mergeUnknownFields(other.unknownFields);
+ onChanged();
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime) e.getUnfinishedMessage();
+ throw e.unwrapIOException();
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private long time_ ;
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ public boolean hasTime() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ public long getTime() {
+ return time_;
+ }
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ public Builder setTime(long value) {
+ bitField0_ |= 0x00000001;
+ time_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 time = 1;</code>
+ */
+ public Builder clearTime() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ time_ = 0L;
+ onChanged();
+ return this;
+ }
+ public final Builder setUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ public final Builder mergeUnknownFields(
+ final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.NodeTime)
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.NodeTime)
+ private static final org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime();
+ }
+
+ public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<NodeTime>
+ PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<NodeTime>() {
+ public NodeTime parsePartialFrom(
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+ return new NodeTime(input, extensionRegistry);
+ }
+ };
+
+ public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<NodeTime> parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<NodeTime> getParserForType() {
+ return PARSER;
+ }
+
+ public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+
+ }
+
private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_TableName_descriptor;
private static final
@@ -19117,6 +19596,11 @@ public final class HBaseProtos {
private static final
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable;
+ private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_NodeTime_descriptor;
+ private static final
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_hbase_pb_NodeTime_fieldAccessorTable;
public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -19175,14 +19659,15 @@ public final class HBaseProtos {
"\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversion_mino" +
"r\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030" +
"\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb.V" +
- "ersionInfo*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rL",
- "ESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003" +
- "\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005N" +
- "O_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014M" +
- "ICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECON" +
- "DS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007BE" +
- "\n1org.apache.hadoop.hbase.shaded.protobu" +
- "f.generatedB\013HBaseProtosH\001\240\001\001"
+ "ersionInfo\"\030\n\010NodeTime\022\014\n\004time\030\001 \001(\004*r\n\013",
+ "CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001" +
+ "\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR" +
+ "_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Time" +
+ "Unit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022" +
+ "\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTE" +
+ "S\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007BE\n1org.apache.h" +
+ "adoop.hbase.shaded.protobuf.generatedB\013H" +
+ "BaseProtosH\001\240\001\001"
};
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@@ -19340,6 +19825,12 @@ public final class HBaseProtos {
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_hbase_pb_RegionServerInfo_descriptor,
new java.lang.String[] { "InfoPort", "VersionInfo", });
+ internal_static_hbase_pb_NodeTime_descriptor =
+ getDescriptor().getMessageTypes().get(24);
+ internal_static_hbase_pb_NodeTime_fieldAccessorTable = new
+ org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_hbase_pb_NodeTime_descriptor,
+ new java.lang.String[] { "Time", });
}
// @@protoc_insertion_point(outer_class_scope)
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 6d67c89..7808618 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -81,6 +81,8 @@ message OpenRegionRequest {
optional uint64 serverStartCode = 2;
// wall clock time from master
optional uint64 master_system_time = 5;
+ // physical or hybrid timestamp from master clock
+ optional NodeTime nodeTime = 6;
message RegionOpenInfo {
required RegionInfo region = 1;
@@ -93,6 +95,8 @@ message OpenRegionRequest {
message OpenRegionResponse {
repeated RegionOpeningState opening_state = 1;
+ // physical or hybrid timestamp from region server clock
+ optional NodeTime nodeTime = 2;
enum RegionOpeningState {
OPENED = 0;
@@ -120,10 +124,14 @@ message CloseRegionRequest {
optional ServerName destination_server = 4;
// the intended server for this RPC.
optional uint64 serverStartCode = 5;
+ // physical or hybrid timestamp from master clock
+ optional NodeTime nodeTime = 6;
}
message CloseRegionResponse {
required bool closed = 1;
+ // physical or hybrid timestamp from region server clock
+ optional NodeTime nodeTime = 2;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/protobuf/HBase.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/HBase.proto b/hbase-protocol-shaded/src/main/protobuf/HBase.proto
index 10742ad..ebf9385 100644
--- a/hbase-protocol-shaded/src/main/protobuf/HBase.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/HBase.proto
@@ -235,3 +235,11 @@ message RegionServerInfo {
optional int32 infoPort = 1;
optional VersionInfo version_info = 2;
}
+
+/**
+ * Used to send timestamp of node. The timestamp can be interpreted as either a physical or hybrid
+ * timestamp using TimestampType.
+ */
+message NodeTime {
+ optional uint64 time = 1;
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index e25064f..76f6813 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -22636,7 +22636,7 @@ public final class ClientProtos {
* </pre>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder>
getCursorFieldBuilder() {
if (cursorBuilder_ == null) {
cursorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 717ec73..e3bb364 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -982,7 +982,7 @@ public final class QuotaProtos {
public final boolean isInitialized() {
if (!hasTimeUnit()) {
-
+
return false;
}
return true;
@@ -2009,37 +2009,37 @@ public final class QuotaProtos {
public final boolean isInitialized() {
if (hasReqNum()) {
if (!getReqNum().isInitialized()) {
-
+
return false;
}
}
if (hasReqSize()) {
if (!getReqSize().isInitialized()) {
-
+
return false;
}
}
if (hasWriteNum()) {
if (!getWriteNum().isInitialized()) {
-
+
return false;
}
}
if (hasWriteSize()) {
if (!getWriteSize().isInitialized()) {
-
+
return false;
}
}
if (hasReadNum()) {
if (!getReadNum().isInitialized()) {
-
+
return false;
}
}
if (hasReadSize()) {
if (!getReadSize().isInitialized()) {
-
+
return false;
}
}
@@ -2169,7 +2169,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota req_num = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getReqNumFieldBuilder() {
if (reqNumBuilder_ == null) {
reqNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2286,7 +2286,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota req_size = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getReqSizeFieldBuilder() {
if (reqSizeBuilder_ == null) {
reqSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2403,7 +2403,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota write_num = 3;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getWriteNumFieldBuilder() {
if (writeNumBuilder_ == null) {
writeNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2520,7 +2520,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota write_size = 4;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getWriteSizeFieldBuilder() {
if (writeSizeBuilder_ == null) {
writeSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2637,7 +2637,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota read_num = 5;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getReadNumFieldBuilder() {
if (readNumBuilder_ == null) {
readNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2754,7 +2754,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota read_size = 6;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getReadSizeFieldBuilder() {
if (readSizeBuilder_ == null) {
readSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -3244,7 +3244,7 @@ public final class QuotaProtos {
public final boolean isInitialized() {
if (hasTimedQuota()) {
if (!getTimedQuota().isInitialized()) {
-
+
return false;
}
}
@@ -3410,7 +3410,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.TimedQuota timed_quota = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
getTimedQuotaFieldBuilder() {
if (timedQuotaBuilder_ == null) {
timedQuotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -3978,7 +3978,7 @@ public final class QuotaProtos {
public final boolean isInitialized() {
if (hasThrottle()) {
if (!getThrottle().isInitialized()) {
-
+
return false;
}
}
@@ -4141,7 +4141,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.Throttle throttle = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder>
getThrottleFieldBuilder() {
if (throttleBuilder_ == null) {
throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -4258,7 +4258,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.SpaceQuota space = 3;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
getSpaceFieldBuilder() {
if (spaceBuilder_ == null) {
spaceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -5876,7 +5876,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
getQuotaFieldBuilder() {
if (quotaBuilder_ == null) {
quotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -7088,7 +7088,7 @@ public final class QuotaProtos {
* <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>
getQuotaStatusFieldBuilder() {
if (quotaStatusBuilder_ == null) {
quotaStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 67fda75..0ded810 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -53,6 +53,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -576,6 +578,25 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
+ public Clock getClock(ClockType clockType) {
+ return super.getClock(clockType);
+ }
+
+ @Override
+ public long updateClock(long timestamp) {
+ return super.updateClock(timestamp);
+ }
+
+ /**
+ * Only for the purpose of testing
+ * @param clock
+ */
+ @VisibleForTesting
+ public void setClock(Clock clock) {
+ super.setClock(clock);
+ }
+
+ @Override
protected TableDescriptors getFsTableDescriptors() throws IOException {
return super.getFsTableDescriptors();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index c515435..2641b8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -58,6 +60,18 @@ import com.google.protobuf.Service;
@InterfaceAudience.Private
public interface MasterServices extends Server {
/**
+ * @param clockType The clock type
+ * @return Master's instance of {@link Clock}
+ */
+ Clock getClock(ClockType clockType);
+
+ /**
+ * @param timestamp The timestamp
+ * @return The current physical or hybrid time of the clock after it is updated
+ */
+ long updateClock(long timestamp);
+
+ /**
* @return the underlying snapshot manager
*/
SnapshotManager getSnapshotManager();
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 4dff6f4..bafe2ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -771,7 +771,7 @@ public class ServerManager {
" failed because no RPC connection found to this server");
}
OpenRegionRequest request =
- RequestConverter.buildOpenRegionRequest(server, region, favoredNodes, false);
+ RequestConverter.buildOpenRegionRequest(server, region, favoredNodes, false, null);
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningState(response);
@@ -845,7 +845,7 @@ public class ServerManager {
* @return a list of region opening states
*/
public List<RegionOpeningState> sendRegionOpen(ServerName server,
- List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos)
+ List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos, Long masterClockTime)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
@@ -854,7 +854,7 @@ public class ServerManager {
}
OpenRegionRequest request =
- RequestConverter.buildOpenRegionRequest(server, regionOpenInfos, false);
+ RequestConverter.buildOpenRegionRequest(server, regionOpenInfos, false, masterClockTime);
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningStateList(response);
@@ -879,7 +879,7 @@ public class ServerManager {
* @throws IOException
*/
public boolean sendRegionClose(ServerName server, HRegionInfo region,
- ServerName dest) throws IOException {
+ ServerName dest, Long masterClockTime) throws IOException {
if (server == null) throw new NullPointerException("Passed server is null");
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
@@ -889,12 +889,12 @@ public class ServerManager {
" failed because no RPC connection found to this server");
}
HBaseRpcController controller = newRpcController();
- return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(), dest);
+ return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(), dest, masterClockTime);
}
public boolean sendRegionClose(ServerName server,
- HRegionInfo region) throws IOException {
- return sendRegionClose(server, region, null);
+ HRegionInfo region, Long masterClockTime) throws IOException {
+ return sendRegionClose(server, region, masterClockTime);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index eda1128..df05405 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -30,9 +30,12 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.Clock;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.ServerListener;
@@ -266,6 +269,16 @@ public class RSProcedureDispatcher
try {
final ExecuteProceduresResponse response = sendRequest(getServerName(), request.build());
+ for (OpenRegionResponse orr : response.getOpenRegionList()) {
+ if (orr.hasNodeTime()) {
+ env.getMasterServices().updateClock(orr.getNodeTime().getTime());
+ }
+ }
+ for (CloseRegionResponse crr : response.getCloseRegionList()) {
+ if (crr.hasNodeTime()) {
+ env.getMasterServices().updateClock(crr.getNodeTime().getTime());
+ }
+ }
remoteCallCompleted(env, response);
} catch (IOException e) {
e = unwrapException(e);
@@ -286,7 +299,7 @@ public class RSProcedureDispatcher
public void dispatchCloseRequests(final MasterProcedureEnv env,
final List<RegionCloseOperation> operations) {
for (RegionCloseOperation op: operations) {
- request.addCloseRegion(op.buildCloseRegionRequest(getServerName()));
+ request.addCloseRegion(op.buildCloseRegionRequest(env, getServerName()));
}
}
@@ -325,6 +338,14 @@ public class RSProcedureDispatcher
final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
builder.setServerStartCode(serverName.getStartcode());
builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
+
+ // Set master clock time for send event
+ // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+ // but in the future we would intend to sync both HLC and system monotonic clocks
+ Clock clock = env.getMasterServices()
+ .getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
+ builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(clock.now()));
+
for (RegionOpenOperation op: operations) {
builder.addOpenInfo(op.buildRegionOpenInfoRequest(env));
}
@@ -347,6 +368,10 @@ public class RSProcedureDispatcher
try {
OpenRegionResponse response = sendRequest(getServerName(), request);
+ if (response.hasNodeTime()) {
+ // Update master clock upon receiving open region response from region server
+ env.getMasterServices().updateClock(response.getNodeTime().getTime());
+ }
remoteCallCompleted(env, response);
} catch (IOException e) {
e = unwrapException(e);
@@ -397,9 +422,17 @@ public class RSProcedureDispatcher
@Override
public Void call() {
final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
- final CloseRegionRequest request = operation.buildCloseRegionRequest(getServerName());
+ final CloseRegionRequest request = operation.buildCloseRegionRequest(env, getServerName());
try {
CloseRegionResponse response = sendRequest(getServerName(), request);
+ if (response.hasNodeTime()) {
+ // Update master clock upon receiving close region response from region server
+ // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta
+ // table, but in the future we would intend to sync both HLC and system monotonic clocks
+ Clock clock = env.getMasterServices()
+ .getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
+ clock.update(response.getNodeTime().getTime());
+ }
remoteCallCompleted(env, response);
} catch (IOException e) {
e = unwrapException(e);
@@ -536,9 +569,14 @@ public class RSProcedureDispatcher
return closed;
}
- public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) {
+ public CloseRegionRequest buildCloseRegionRequest(final MasterProcedureEnv env,
+ final ServerName serverName) {
+ // Set master clock time for send event
+ // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+ // but in the future we would intend to sync both HLC and system monotonic clocks
+ Clock clock = env.getMasterServices().getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
return ProtobufUtil.buildCloseRegionRequest(serverName,
- getRegionInfo().getRegionName(), getDestinationServer());
+ getRegionInfo().getRegionName(), getDestinationServer(), clock.now());
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a55be97..2b82e4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ClockType;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
@@ -387,7 +386,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
@Override
public Clock getClock() {
if (this.clock == null) {
- return this.getRegionServerServices().getRegionServerClock(
+ return this.getRegionServerServices().getClock(
getTableDescriptor().getClockType());
}
return this.clock;
@@ -798,7 +797,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
? DEFAULT_DURABILITY
: htd.getDurability();
if (rsServices != null) {
- this.clock = rsServices.getRegionServerClock(htd.getClockType());
+ this.clock = rsServices.getClock(htd.getClockType());
this.rsAccounting = this.rsServices.getRegionServerAccounting();
// don't initialize coprocessors if not running within a regionserver
// TODO: revisit if coprocessors should load in other cases
@@ -1012,6 +1011,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
long maxSeqId = -1;
// initialized to -1 so that we pick up MemstoreTS from column families
long maxMemstoreTS = -1;
+ // Largest timestamp found across all stores
+ long maxTimestamp = 0;
if (htableDescriptor.getColumnFamilyCount() != 0) {
// initialize the thread pool for opening stores in parallel.
@@ -1050,8 +1051,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
if (maxStoreMemstoreTS > maxMemstoreTS) {
maxMemstoreTS = maxStoreMemstoreTS;
}
+ maxTimestamp = Math.max(maxTimestamp, store.getMaxTimestamp());
}
allStoresOpened = true;
+ clock.update(maxTimestamp);
if(hasSloppyStores) {
htableDescriptor = TableDescriptorBuilder.newBuilder(htableDescriptor)
.setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class.getName())
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f623f88..d8d87f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TimestampType;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -336,9 +337,9 @@ public class HRegionServer extends HasThread implements
// Region server contains instances of all three clock clocks. Regions have a set
// clock type so depending on the clock type needed by a region, the appropriate
// one can be accessed.
- final protected Clock hybridLogicalClock;
- final protected Clock systemMonotonicClock;
- final protected Clock systemClock;
+ protected Clock hybridLogicalClock;
+ protected Clock systemMonotonicClock;
+ protected Clock systemClock;
ConcurrentMap<String, Integer> rowlocks = new ConcurrentHashMap<>();
@@ -2087,14 +2088,46 @@ public class HRegionServer extends HasThread implements
}
@Override
- public Clock getRegionServerClock(ClockType clockType) {
- if (clockType.equals(ClockType.HLC)){
- return this.hybridLogicalClock;
- } else if (clockType.equals(ClockType.SYSTEM_MONOTONIC)) {
- return this.systemMonotonicClock;
- } else {
- return this.systemClock;
+ public Clock getClock(ClockType clockType) {
+ switch (clockType) {
+ case HLC:
+ return this.hybridLogicalClock;
+ case SYSTEM_MONOTONIC:
+ return this.systemMonotonicClock;
+ case SYSTEM:
+ return this.systemClock;
+ default:
+ throw new IllegalArgumentException("Wrong clock type: " + clockType.toString());
+ }
+ }
+
+ /**
+ * Only for the purpose of testing
+ * @param clock
+ */
+ @VisibleForTesting
+ public void setClock(Clock clock) {
+ switch (clock.getClockType()) {
+ case HLC:
+ this.hybridLogicalClock = clock;
+ break;
+ case SYSTEM_MONOTONIC:
+ this.systemMonotonicClock = clock;
+ break;
+ case SYSTEM:
+ this.systemClock = clock;
+ break;
+ default:
+ throw new IllegalArgumentException("Wrong clock type: " + clock.getClockType().toString());
+ }
+ }
+
+ @Override
+ public long updateClock(long timestamp) {
+ if (TimestampType.HYBRID.isLikelyOfType(timestamp)) {
+ return this.hybridLogicalClock.update(timestamp);
}
+ return this.systemMonotonicClock.update(timestamp);
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 4b2b460..7245597 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-import org.apache.hadoop.hbase.TimestampType;
import org.apache.hadoop.hbase.Clock;
import com.google.common.annotations.VisibleForTesting;
@@ -481,6 +480,10 @@ public class HStore implements Store {
return StoreUtils.getMaxMemstoreTSInList(this.getStorefiles());
}
+ public long getMaxTimestamp() {
+ return StoreUtils.getMaxTimestampInList(this.getStorefiles());
+ }
+
/**
* @param tabledir {@link Path} to where the table is being stored
* @param hri {@link HRegionInfo} for the region.
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 8bd1b5d..2fd00d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Clock;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseIOException;
@@ -200,6 +201,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanReques
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
@@ -226,8 +228,6 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.zookeeper.KeeperException;
-import com.google.common.annotations.VisibleForTesting;
-
/**
* Implements the regionserver RPC services.
*/
@@ -1514,6 +1514,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
+ if (request.hasNodeTime()) {
+ this.regionServer.updateClock(request.getNodeTime().getTime());
+ }
+
requestCount.increment();
if (sn == null) {
LOG.info("Close " + encodedRegionName + " without moving");
@@ -1521,7 +1525,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
LOG.info("Close " + encodedRegionName + ", moving to " + sn);
}
boolean closed = regionServer.closeRegion(encodedRegionName, false, sn);
- CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed);
+ // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+ // but in the future we would intend to sync both HLC and system monotonic clocks
+ long regionServerClockTime = this.regionServer
+ .getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE).now();
+
+ CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder()
+ .setClosed(closed)
+ .setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(regionServerClockTime));
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
@@ -1896,6 +1907,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
+ // Update region server clock on receive event
+ if (request.hasNodeTime()) {
+ this.regionServer.updateClock(request.getNodeTime().getTime());
+ }
+
for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
HTableDescriptor htd;
@@ -1947,7 +1963,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
// Check if current region open is for distributedLogReplay. This check is to support
// rolling restart/upgrade where we want to Master/RS see same configuration
if (!regionOpenInfo.hasOpenForDistributedLogReplay()
- || regionOpenInfo.getOpenForDistributedLogReplay()) {
+ || regionOpenInfo.getOpenForDistributedLogReplay()) {
regionServer.recoveringRegions.put(region.getEncodedName(), null);
} else {
// Remove stale recovery region from ZK when we open region not for recovering which
@@ -1978,10 +1994,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
regionServer.service.submit(new OpenPriorityRegionHandler(
- regionServer, regionServer, region, htd, masterSystemTime));
+ regionServer, regionServer, region, htd, masterSystemTime));
} else {
regionServer.service.submit(new OpenRegionHandler(
- regionServer, regionServer, region, htd, masterSystemTime));
+ regionServer, regionServer, region, htd, masterSystemTime));
}
}
}
@@ -2000,6 +2016,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
}
}
+
+ // Set clock for send event
+ // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+ // but in the future we would intend to sync both HLC and system monotonic clocks
+ Clock clock = this.regionServer.getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
+ builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(clock.now()));
+
return builder.build();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 5c37136..9b17e47 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -60,7 +60,17 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
* default (common) WAL */
WAL getWAL(HRegionInfo regionInfo) throws IOException;
- Clock getRegionServerClock(ClockType clockType);
+ /**
+ * @param clockType The clock type
+ * @return Region server's instance of {@link Clock}
+ */
+ Clock getClock(ClockType clockType);
+
+ /**
+ * @param timestamp The timestamp
+ * @return The current physical or hybrid time of the clock after it is updated
+ */
+ long updateClock(long timestamp);
/** @return the List of WALs that are used by this server
* Doesn't include the meta WAL
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 5623adb..a1903b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -114,6 +114,14 @@ public class StoreUtils {
return max;
}
+ public static long getMaxTimestampInList(Collection<StoreFile> sfs) {
+ long max = 0;
+ for (StoreFile sf : sfs) {
+ max = Math.max(max, sf.getMaximumTimestamp().orElse(Long.MIN_VALUE));
+ }
+ return max;
+ }
+
/**
* Gets the approximate mid-point of the given file that is optimal for use in splitting it.
* @param file the store file
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 9f568f2..0640457 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -253,11 +253,19 @@ public class MockRegionServerServices implements RegionServerServices {
}
@Override
- public Clock getRegionServerClock(ClockType clockType) {
+ public Clock getClock(ClockType clockType) {
return Clock.getDummyClockOfGivenClockType(clockType);
}
@Override
+ public long updateClock(long timestamp) {
+ if (TimestampType.HYBRID.isLikelyOfType(timestamp)) {
+ return new Clock.HLC().update(timestamp);
+ }
+ return new Clock.SystemMonotonic().update(timestamp);
+ }
+
+ @Override
public ExecutorService getExecutorService() {
return null;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
index 572a128..bc95f46 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
@@ -19,8 +19,13 @@
package org.apache.hadoop.hbase;
import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -28,37 +33,81 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.TimestampType;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.junit.After;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
+import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-
+import org.junit.rules.TestName;
@Category({MediumTests.class})
public class TestClockWithCluster {
private static final Log LOG = LogFactory.getLog(TestClockWithCluster.class);
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ @Rule
+ public TestName name = new TestName();
+ private static final HBaseTestingUtility HBTU = new HBaseTestingUtility();
private static Connection connection;
- private byte[] columnFamily = Bytes.toBytes("testCF");
+
+ private Admin admin;
+ private TableName tableName;
+ private Table table;
+
+ // Test names
+ private static final byte[] TEST_FAMILY = Bytes.toBytes("f1");
+
@BeforeClass
public static void setupClass() throws Exception {
- UTIL.startMiniCluster(1);
- connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
+ final int NUM_MASTERS = 1;
+ final int NUM_RS = 1;
+ HBTU.startMiniCluster(NUM_MASTERS, NUM_RS);
+ connection = ConnectionFactory.createConnection(HBTU.getConfiguration());
}
@AfterClass
public static void tearDownClass() throws Exception {
connection.close();
- UTIL.shutdownMiniCluster();
+ HBTU.shutdownMiniCluster();
+ }
+
+ @Before
+ public void setup() throws Exception {
+ admin = connection.getAdmin();
+ tableName = TableName.valueOf(name.getMethodName());
+ admin.createTable(TableDescriptorBuilder.newBuilder(tableName)
+ .addColumnFamily(new HColumnDescriptor(TEST_FAMILY))
+ .build());
+ table = connection.getTable(tableName);
+ }
+
+ @After
+ public void teardown() throws Exception {
+ try {
+ if (table != null) {
+ table.close();
+ }
+ } finally {
+ try {
+ HBTU.deleteTable(tableName);
+ } catch (IOException ioe) {
+ LOG.error("Failed deleting table '" + tableName + "' during teardown. Exception:" + ioe);
+ }
+ }
}
private void verifyTimestamps(Table table, final byte[] f, int startRow, int endRow,
@@ -75,29 +124,21 @@ public class TestClockWithCluster {
@Test
public void testNewTablesAreCreatedWithSystemClock() throws IOException {
- Admin admin = connection.getAdmin();
- TableName tableName = TableName.valueOf("TestNewTablesAreSystemByDefault");
- admin.createTable(new HTableDescriptor(tableName).addFamily(new
- HColumnDescriptor(columnFamily)));
-
- Table table = connection.getTable(tableName);
-
ClockType clockType = admin.getTableDescriptor(tableName).getClockType();
assertEquals(ClockType.SYSTEM, clockType);
// write
- UTIL.loadNumericRows(table, columnFamily, 0, 1000);
+ HBTU.loadNumericRows(table, TEST_FAMILY, 0, 1000);
// read , check if the it is same.
- UTIL.verifyNumericRows(table, Bytes.toBytes("testCF"), 0, 1000, 0);
+ HBTU.verifyNumericRows(table, TEST_FAMILY, 0, 1000, 0);
// This check will be useful if Clock type were to be system monotonic or HLC.
- verifyTimestamps(table, columnFamily, 0, 1000, TimestampType.PHYSICAL);
+ verifyTimestamps(table, TEST_FAMILY, 0, 1000, TimestampType.PHYSICAL);
}
@Test
public void testMetaTableClockTypeIsHLC() throws IOException {
- Admin admin = connection.getAdmin();
- Table table = connection.getTable(TableName.META_TABLE_NAME);
- ClockType clockType = admin.getTableDescriptor(TableName.META_TABLE_NAME).getClockType();
+ ClockType clockType = admin
+ .getTableDescriptor(TableName.META_TABLE_NAME).getClockType();
assertEquals(ClockType.HLC, clockType);
}
@@ -112,4 +153,214 @@ public class TestClockWithCluster {
assertTrue(TimestampType.HYBRID.isLikelyOfType(cell.getTimestamp()));
}
}
+
+ private long getColumnLatestCellTimestamp(HRegionInfo hri) throws IOException {
+ Result result = MetaTableAccessor.getRegionResult(connection, hri.getRegionName());
+ Cell cell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER);
+ return cell.getTimestamp();
+ }
+
+ private void assertHLCTime(Clock.HLC clock, long expectedPhysicalTime, long expectedLogicalTime) {
+ assertEquals(expectedPhysicalTime, clock.getPhysicalTime());
+ assertEquals(expectedLogicalTime, clock.getLogicalTime());
+ }
+
+ @Test
+ public void testRegionStateTransitionTimestampsIncreaseMonotonically() throws Exception {
+ HRegionServer rs = HBTU.getRSForFirstRegionInTable(tableName);
+ List<Region> regions = rs.getOnlineRegions();
+
+ assert(!regions.isEmpty());
+
+ MiniHBaseCluster cluster = HBTU.getHBaseCluster();
+
+ assertTrue(cluster.waitForActiveAndReadyMaster());
+ HMaster master = cluster.getMaster();
+ assertTrue(master.isActiveMaster());
+ assertTrue(master.isInitialized());
+
+ RegionStates regionStates = master.getAssignmentManager().getRegionStates();
+
+ assertEquals(3, cluster.countServedRegions());
+ HRegionInfo hriOnline;
+ try (RegionLocator locator =
+ HBTU.getConnection().getRegionLocator(tableName)) {
+ hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+ }
+
+ HRegion regionMeta = null;
+ for (Region r : master.getOnlineRegions()) {
+ if (r.getRegionInfo().isMetaRegion()) {
+ regionMeta = ((HRegion) r);
+ }
+ }
+
+ assertNotNull(regionMeta);
+
+ // Inject physical clock that always returns same physical time into hybrid logical clock
+ long systemTime = Clock.DEFAULT_JAVA_MILLIS_PHYSICAL_CLOCK.now();
+ Clock.PhysicalClock physicalClock = mock(Clock.PhysicalClock.class);
+ when(physicalClock.now()).thenReturn(systemTime);
+ when(physicalClock.getTimeUnit()).thenReturn(TimeUnit.MILLISECONDS);
+ Clock.HLC clock = new Clock.HLC(physicalClock, Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+
+ // The region clock is used for setting timestamps for table mutations and the region server
+ // clock is used for updating the clock on region assign/unassign events.
+
+ // Set meta region clock so that region state transitions are timestamped with mocked clock
+ regionMeta.setClock(clock);
+ master.setClock(clock);
+
+ HRegion userRegion = null;
+ for (Region region : regions) {
+ if (region.getRegionInfo().getTable().equals(tableName)) {
+ userRegion = (HRegion) region;
+ }
+ }
+ assertNotNull(userRegion);
+
+ // Only mock the region server clock because the region clock does not get used during
+ // unassignment and assignment
+ rs.setClock(clock);
+
+ // Repeatedly unassign and assign region while tracking the timestamps of the region state
+ // transitions from the meta table
+ List<Long> timestamps = new ArrayList<>();
+ // Set expected logical time to 0 as initial clock.now() sets clock's logical time to 0
+ long expectedLogicalTime = TimestampType.HYBRID.getLogicalTime(clock.now());
+ for (int i = 0; i < 10; i++) {
+ admin.unassign(hriOnline.getRegionName(), false);
+ assertEquals(RegionState.State.CLOSED, regionStates.getRegionState(hriOnline).getState());
+ // clock.now() is called 8 times and clock.update() is called 2 times, each call increments
+ // the logical time by one.
+ // 0 [now] Get region info from hbase:meta in HBaseAdmin#unassign
+ // 1 [now] Get region info from hbase:meta in MasterRpcServices#unassignRegion
+ // 2,3 [now] Update hbase:meta
+ // 4 [now] Send unassign region request to region server
+ // 5 [update] Upon region region server clock upon receiving unassign region request
+ // 6 [now] Send region server response back to master
+ // 7 [update] Update master clock upon close region response form region server
+ // 8,9 [now] Update hbase:meta
+ expectedLogicalTime += 10;
+
+ assertEquals(expectedLogicalTime, clock.getLogicalTime());
+ timestamps.add(clock.getLogicalTime());
+
+ admin.assign(hriOnline.getRegionName());
+ // clock.now() is called 7 times and clock.update() is called 2 times, each call increments
+ // the logical time by one.
+ // 0 [now] Get region info from hbase:meta in HBaseAdmin#unassign
+ // 1,2 [now] Update hbase:meta
+ // 3 [now] Send unassign region request to region server
+ // 4 [update] Update region region server clock upon receiving unassign region request
+ // 5 [now] Send region server response back to master
+ // 6 [update] Update master clock upon close region response form region server
+ // 7,8 [now] Update hbase:meta
+ // Assignment has one less call to clock.now() because MasterRpcServices#assignRegion instead
+ // gets the region info from assignment manager rather than meta table accessor
+ expectedLogicalTime += 9;
+ assertEquals(RegionState.State.OPEN, regionStates.getRegionState(hriOnline).getState());
+ assertEquals(expectedLogicalTime, clock.getLogicalTime());
+ timestamps.add(clock.getLogicalTime());
+ }
+
+ // Ensure that the hybrid timestamps are strictly increasing
+ for (int i = 0; i < timestamps.size() - 1; i++) {
+ if (timestamps.get(i) >= timestamps.get(i + 1)) {
+ Assert.fail("Current ts is " + timestamps.get(i)
+ + ", but the next ts is equal or smaller " + timestamps.get(i + 1));
+ }
+ }
+ }
+
+ @Test
+ public void testRegionOpenAndCloseClockUpdates() throws Exception {
+ HRegionServer rs = HBTU.getRSForFirstRegionInTable(tableName);
+ List<Region> regions = rs.getOnlineRegions();
+
+ assert(!regions.isEmpty());
+
+ MiniHBaseCluster cluster = HBTU.getHBaseCluster();
+
+ assertTrue(cluster.waitForActiveAndReadyMaster());
+ HMaster master = cluster.getMaster();
+ assertTrue(master.isActiveMaster());
+ assertTrue(master.isInitialized());
+
+ RegionStates regionStates = master.getAssignmentManager().getRegionStates();
+
+ HRegionInfo hriOnline;
+ try (RegionLocator locator =
+ HBTU.getConnection().getRegionLocator(tableName)) {
+ hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+ }
+
+ HRegion regionMeta = null;
+ for (Region r : master.getOnlineRegions()) {
+ if (r.getRegionInfo().isMetaRegion()) {
+ regionMeta = ((HRegion) r);
+ }
+ }
+
+ assertNotNull(regionMeta);
+
+ // Instantiate two hybrid logical clocks with mocked physical clocks
+ long expectedPhysicalTime = Clock.DEFAULT_JAVA_MILLIS_PHYSICAL_CLOCK.now();
+ Clock.PhysicalClock masterPhysicalClock = mock(Clock.PhysicalClock.class);
+ when(masterPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+ when(masterPhysicalClock.getTimeUnit()).thenReturn(TimeUnit.MILLISECONDS);
+ Clock.HLC masterClock = new Clock.HLC(masterPhysicalClock, Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+ master.setClock(masterClock);
+ regionMeta.setClock(masterClock);
+
+ Clock.PhysicalClock rsPhysicalClock = mock(Clock.PhysicalClock.class);
+ when(rsPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+ when(rsPhysicalClock.getTimeUnit()).thenReturn(TimeUnit.MILLISECONDS);
+ Clock.HLC rsClock = new Clock.HLC(rsPhysicalClock, Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+ // We only mock the region server clock here because the region clock does not get used
+ // during unassignment and assignment
+ rs.setClock(rsClock);
+
+ // Increment master physical clock time
+ expectedPhysicalTime += 1000;
+ when(masterPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+
+ // Unassign region, region server should advance its clock upon receiving close region request
+ admin.unassign(hriOnline.getRegionName(), false);
+ assertEquals(RegionState.State.CLOSED, regionStates.getRegionState(hriOnline).getState());
+ // Verify that region server clock time increased
+ // Previous test has explanation for each event that increases logical time
+ assertHLCTime(masterClock, expectedPhysicalTime, 9);
+ assertHLCTime(rsClock, expectedPhysicalTime, 6);
+
+ // Increase region server physical clock time
+ expectedPhysicalTime += 1000;
+ when(rsPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+ // Assign region, master server should advance its clock upon receiving close region response
+ admin.assign(hriOnline.getRegionName());
+ assertEquals(RegionState.State.OPEN, regionStates.getRegionState(hriOnline).getState());
+ // Verify that master clock time increased
+ assertHLCTime(masterClock, expectedPhysicalTime, 4);
+ assertHLCTime(rsClock, expectedPhysicalTime, 1);
+
+ // Increment region server physical clock time
+ expectedPhysicalTime += 1000;
+ when(rsPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+ // Unassign region, region server should advance its clock upon receiving close region request
+ admin.unassign(hriOnline.getRegionName(), false);
+ assertEquals(RegionState.State.CLOSED, regionStates.getRegionState(hriOnline).getState());
+ // Verify that master server clock time increased
+ assertHLCTime(masterClock, expectedPhysicalTime, 4);
+ assertHLCTime(rsClock, expectedPhysicalTime, 1);
+
+ // Increase master server physical clock time
+ expectedPhysicalTime += 1000;
+ when(masterPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+ // Assign region, master server should advance its clock upon receiving close region response
+ admin.assign(hriOnline.getRegionName());
+ assertEquals(RegionState.State.OPEN, regionStates.getRegionState(hriOnline).getState());
+ // Verify that region server clock time increased
+ assertHLCTime(masterClock, expectedPhysicalTime, 8);
+ assertHLCTime(rsClock, expectedPhysicalTime, 5);
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 7b4442b..fd51d0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -238,7 +238,7 @@ public class TestReplicasClient {
} catch (Exception e){}
// first version is '0'
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(
- getRS().getServerName(), hri, null, null);
+ getRS().getServerName(), hri, null, null, null);
AdminProtos.OpenRegionResponse responseOpen = getRS().getRSRpcServices().openRegion(null, orr);
Assert.assertEquals(responseOpen.getOpeningStateCount(), 1);
Assert.assertEquals(responseOpen.getOpeningState(0),
@@ -248,7 +248,7 @@ public class TestReplicasClient {
private void closeRegion(HRegionInfo hri) throws Exception {
AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest(
- getRS().getServerName(), hri.getEncodedName());
+ getRS().getServerName(), hri.getEncodedName(), null);
AdminProtos.CloseRegionResponse responseClose = getRS()
.getRSRpcServices().closeRegion(null, crr);
Assert.assertTrue(responseClose.getClosed());
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
index c5a9efc..82e8510 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
@@ -110,6 +110,8 @@ public class TestIncrementTimeRange {
// test that depends on an evironment edge that is manually moved forward.
util.getConfiguration().setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 0);
util.startMiniCluster();
+ // Ensure that current system time is set when clock updates during region open
+ mee.setValue(EnvironmentEdgeManager.currentTime());
EnvironmentEdgeManager.injectEdge(mee);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 8f878b8..cfae7d0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -22,6 +22,8 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -86,6 +88,12 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
+ public Clock getClock(ClockType clockType) { return null; };
+
+ @Override
+ public long updateClock(long timestamp) { return 0; }
+
+ @Override
public AssignmentManager getAssignmentManager() {
return null;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 089cf69..3edd445 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TimestampType;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Result;
@@ -581,11 +582,19 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
}
@Override
- public Clock getRegionServerClock(ClockType clockType) {
+ public Clock getClock(ClockType clockType) {
return new Clock.System();
}
@Override
+ public long updateClock(long timestamp) {
+ if (TimestampType.HYBRID.isLikelyOfType(timestamp)) {
+ return new Clock.HLC().update(timestamp);
+ }
+ return new Clock.SystemMonotonic().update(timestamp);
+ }
+
+ @Override
public ExecutorService getExecutorService() {
return null;
}