You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/07/23 23:03:45 UTC
[1/3] hbase git commit: HBASE-16008 A robust way deal with early
termination of HBCK (Stephen Yuan Jiang)
Repository: hbase
Updated Branches:
refs/heads/master cd0b85e0a -> bdd7782f0
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 2bf36b4..f67b7c7 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -258,6 +258,13 @@ message StopMasterRequest {
message StopMasterResponse {
}
+message IsInMaintenanceModeRequest {
+}
+
+message IsInMaintenanceModeResponse {
+ required bool inMaintenanceMode = 1;
+}
+
message BalanceRequest {
optional bool force = 1;
}
@@ -641,6 +648,12 @@ service MasterService {
returns(StopMasterResponse);
/**
+ * Query whether the Master is in maintenance mode.
+ */
+ rpc IsMasterInMaintenanceMode(IsInMaintenanceModeRequest)
+ returns(IsInMaintenanceModeResponse);
+
+ /**
* Run the balancer. Will run the balancer and if regions to move, it will
* go ahead and do the reassignments. Can NOT run for various reasons.
* Check logs.
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index c93b307..476c796 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -109,6 +109,7 @@ public class CatalogJanitor extends ScheduledChore {
try {
AssignmentManager am = this.services.getAssignmentManager();
if (this.enabled.get()
+ && !this.services.isInMaintenanceMode()
&& am != null
&& am.isFailoverCleanupDone()
&& am.getRegionStates().getRegionsInTransition().size() == 0) {
@@ -241,6 +242,11 @@ public class CatalogJanitor extends ScheduledChore {
int mergeCleaned = 0;
Map<HRegionInfo, Result> mergedRegions = scanTriple.getSecond();
for (Map.Entry<HRegionInfo, Result> e : mergedRegions.entrySet()) {
+ if (this.services.isInMaintenanceMode()) {
+ // Stop cleaning if the master is in maintenance mode
+ break;
+ }
+
PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(e.getValue());
HRegionInfo regionA = p.getFirst();
HRegionInfo regionB = p.getSecond();
@@ -266,6 +272,11 @@ public class CatalogJanitor extends ScheduledChore {
// regions whose parents are still around
HashSet<String> parentNotCleaned = new HashSet<String>();
for (Map.Entry<HRegionInfo, Result> e : splitParents.entrySet()) {
+ if (this.services.isInMaintenanceMode()) {
+ // Stop cleaning if the master is in maintenance mode
+ break;
+ }
+
if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
cleanParent(e.getKey(), e.getValue())) {
splitCleaned++;
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 9196368..5f5cc38 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -159,6 +159,8 @@ import org.apache.hadoop.hbase.util.ZKDataMigrator;
import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
@@ -261,6 +263,9 @@ public class HMaster extends HRegionServer implements MasterServices {
// Tracker for region normalizer state
private RegionNormalizerTracker regionNormalizerTracker;
+ //Tracker for master maintenance mode setting
+ private MasterMaintenanceModeTracker maintenanceModeTracker;
+
private ClusterSchemaService clusterSchemaService;
// Metrics for the HMaster
@@ -616,6 +621,9 @@ public class HMaster extends HRegionServer implements MasterServices {
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
this.drainingServerTracker.start();
+ this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);
+ this.maintenanceModeTracker.start();
+
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
@@ -1125,6 +1133,12 @@ public class HMaster extends HRegionServer implements MasterServices {
LOG.debug("Master has not been initialized, don't run balancer.");
return false;
}
+
+ if (isInMaintenanceMode()) {
+ LOG.info("Master is in maintenanceMode mode, don't run balancer.");
+ return false;
+ }
+
// Do this call outside of synchronized block.
int maximumBalanceTime = getBalancerCutoffTime();
synchronized (this.balancer) {
@@ -1228,6 +1242,11 @@ public class HMaster extends HRegionServer implements MasterServices {
return false;
}
+ if (isInMaintenanceMode()) {
+ LOG.info("Master is in maintenance mode, don't run region normalizer.");
+ return false;
+ }
+
if (!this.regionNormalizerTracker.isNormalizerOn()) {
LOG.debug("Region normalization is disabled, don't run region normalizer.");
return false;
@@ -1241,6 +1260,11 @@ public class HMaster extends HRegionServer implements MasterServices {
Collections.shuffle(allEnabledTables);
for (TableName table : allEnabledTables) {
+ if (isInMaintenanceMode()) {
+ LOG.debug("Master is in maintenance mode, stop running region normalizer.");
+ return false;
+ }
+
HTableDescriptor tblDesc = getTableDescriptors().get(table);
if (table.isSystemTable() || (tblDesc != null &&
!tblDesc.isNormalizationEnabled())) {
@@ -2264,6 +2288,16 @@ public class HMaster extends HRegionServer implements MasterServices {
return initialized.isReady();
}
+ /**
+ * Report whether this master is in maintenance mode.
+ *
+ * @return true if master is in maintenanceMode
+ */
+ @Override
+ public boolean isInMaintenanceMode() {
+ return maintenanceModeTracker.isInMaintenanceMode();
+ }
+
@VisibleForTesting
public void setInitialized(boolean isInitialized) {
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
@@ -2735,7 +2769,9 @@ public class HMaster extends HRegionServer implements MasterServices {
* @return The state of the load balancer, or false if the load balancer isn't defined.
*/
public boolean isBalancerOn() {
- if (null == loadBalancerTracker) return false;
+ if (null == loadBalancerTracker || isInMaintenanceMode()) {
+ return false;
+ }
return loadBalancerTracker.isBalancerOn();
}
@@ -2744,10 +2780,10 @@ public class HMaster extends HRegionServer implements MasterServices {
* false is returned.
*/
public boolean isNormalizerOn() {
- return null == regionNormalizerTracker? false: regionNormalizerTracker.isNormalizerOn();
+ return (null == regionNormalizerTracker || isInMaintenanceMode()) ?
+ false: regionNormalizerTracker.isNormalizerOn();
}
-
/**
* Queries the state of the {@link SplitOrMergeTracker}. If it is not initialized,
* false is returned. If switchType is illegal, false will return.
@@ -2755,7 +2791,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* @return The state of the switch
*/
public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
- if (null == splitOrMergeTracker) {
+ if (null == splitOrMergeTracker || isInMaintenanceMode()) {
return false;
}
return splitOrMergeTracker.isSplitOrMergeEnabled(switchType);
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 90af7c7..8974945 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1246,6 +1246,15 @@ public class MasterRpcServices extends RSRpcServices
}
@Override
+ public IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+ final RpcController controller,
+ final IsInMaintenanceModeRequest request) throws ServiceException {
+ IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
+ response.setInMaintenanceMode(master.isInMaintenanceMode());
+ return response.build();
+ }
+
+ @Override
public UnassignRegionResponse unassignRegion(RpcController controller,
UnassignRegionRequest req) throws ServiceException {
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 9b91572..cfb2023 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -310,6 +310,11 @@ public interface MasterServices extends Server {
boolean isInitialized();
/**
+ * @return true if master is in maintanceMode
+ */
+ boolean isInMaintenanceMode();
+
+ /**
* Abort a procedure.
* @param procId ID of the procedure
* @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index c05973b..d483c71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -104,7 +104,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
@@ -209,6 +208,9 @@ public class HBaseFsck extends Configured implements Closeable {
// AlreadyBeingCreatedException which is implies timeout on this operations up to
// HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).
private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds
+ private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;
+ private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds
+ private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds
/**********************
* Internal resources
@@ -236,8 +238,6 @@ public class HBaseFsck extends Configured implements Closeable {
private static boolean details = false; // do we display the full report
private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older
private static boolean forceExclusive = false; // only this hbck can modify HBase
- private static boolean disableBalancer = false; // disable load balancer to keep regions stable
- private static boolean disableSplitAndMerge = false; // disable split and merge
private boolean fixAssignments = false; // fix assignment errors?
private boolean fixMeta = false; // fix meta errors?
private boolean checkHdfs = true; // load and check fs consistency?
@@ -306,10 +306,13 @@ public class HBaseFsck extends Configured implements Closeable {
private Map<TableName, TableState> tableStates =
new HashMap<TableName, TableState>();
private final RetryCounterFactory lockFileRetryCounterFactory;
+ private final RetryCounterFactory createZNodeRetryCounterFactory;
private Map<TableName, Set<String>> skippedRegions = new HashMap<TableName, Set<String>>();
- ZooKeeperWatcher zkw = null;
+ private ZooKeeperWatcher zkw = null;
+ private String hbckEphemeralNodePath = null;
+ private boolean hbckZodeCreated = false;
/**
* Constructor
@@ -349,6 +352,14 @@ public class HBaseFsck extends Configured implements Closeable {
"hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),
getConf().getInt(
"hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));
+ createZNodeRetryCounterFactory = new RetryCounterFactory(
+ getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),
+ getConf().getInt(
+ "hbase.hbck.createznode.attempt.sleep.interval",
+ DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),
+ getConf().getInt(
+ "hbase.hbck.createznode.attempt.maxsleeptime",
+ DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));
zkw = createZooKeeperWatcher();
}
@@ -498,6 +509,7 @@ public class HBaseFsck extends Configured implements Closeable {
@Override
public void run() {
IOUtils.closeQuietly(HBaseFsck.this);
+ cleanupHbckZnode();
unlockHbck();
}
});
@@ -677,48 +689,77 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
+ * This method maintains an ephemeral znode. If the creation fails we return false or throw
+ * exception
+ *
+ * @return true if creating znode succeeds; false otherwise
+ * @throws IOException if IO failure occurs
+ */
+ private boolean setMasterInMaintenanceMode() throws IOException {
+ RetryCounter retryCounter = createZNodeRetryCounterFactory.create();
+ hbckEphemeralNodePath = ZKUtil.joinZNode(
+ ZooKeeperWatcher.masterMaintZNode,
+ "hbck-" + Long.toString(EnvironmentEdgeManager.currentTime()));
+ do {
+ try {
+ hbckZodeCreated = ZKUtil.createEphemeralNodeAndWatch(zkw, hbckEphemeralNodePath, null);
+ if (hbckZodeCreated) {
+ break;
+ }
+ } catch (KeeperException e) {
+ if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) {
+ throw new IOException("Can't create znode " + hbckEphemeralNodePath, e);
+ }
+ // fall through and retry
+ }
+
+ LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" +
+ (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts());
+
+ try {
+ retryCounter.sleepUntilNextRetry();
+ } catch (InterruptedException ie) {
+ throw (InterruptedIOException) new InterruptedIOException(
+ "Can't create znode " + hbckEphemeralNodePath).initCause(ie);
+ }
+ } while (retryCounter.shouldRetry());
+ return hbckZodeCreated;
+ }
+
+ private void cleanupHbckZnode() {
+ try {
+ if (zkw != null && hbckZodeCreated) {
+ ZKUtil.deleteNode(zkw, hbckEphemeralNodePath);
+ hbckZodeCreated = false;
+ }
+ } catch (KeeperException e) {
+ // Ignore
+ if (!e.code().equals(KeeperException.Code.NONODE)) {
+ LOG.warn("Delete HBCK znode " + hbckEphemeralNodePath + " failed ", e);
+ }
+ }
+ }
+
+ /**
* Contacts the master and prints out cluster-wide information
* @return 0 on success, non-zero on failure
*/
- public int onlineHbck() throws IOException, KeeperException, InterruptedException, ServiceException {
+ public int onlineHbck()
+ throws IOException, KeeperException, InterruptedException, ServiceException {
// print hbase server version
errors.print("Version: " + status.getHBaseVersion());
offlineHdfsIntegrityRepair();
- boolean oldBalancer = false;
- if (shouldDisableBalancer()) {
- oldBalancer = admin.setBalancerRunning(false, true);
- }
- boolean[] oldSplitAndMerge = null;
- if (shouldDisableSplitAndMerge()) {
- oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false,
- MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
+ // If Master runs maintenance tasks (such as balancer, catalog janitor, etc) during online
+ // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it
+ // is better to set Master into maintenance mode during online hbck.
+ //
+ if (!setMasterInMaintenanceMode()) {
+ LOG.warn("HBCK is running while master is not in maintenance mode, you might see transient "
+ + "error. Please run HBCK multiple times to reduce the chance of transient error.");
}
- try {
- onlineConsistencyRepair();
- }
- finally {
- // Only restore the balancer if it was true when we started repairing and
- // we actually disabled it. Otherwise, we might clobber another run of
- // hbck that has just restored it.
- if (shouldDisableBalancer() && oldBalancer) {
- admin.setBalancerRunning(oldBalancer, false);
- }
-
- if (shouldDisableSplitAndMerge()) {
- if (oldSplitAndMerge != null) {
- if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) {
- admin.setSplitOrMergeEnabled(true, false,
- MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
- } else if (oldSplitAndMerge[0]) {
- admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT);
- } else if (oldSplitAndMerge[1]) {
- admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE);
- }
- }
- }
- }
+ onlineConsistencyRepair();
if (checkRegionBoundaries) {
checkRegionBoundaries();
@@ -730,6 +771,9 @@ public class HBaseFsck extends Configured implements Closeable {
checkAndFixReplication();
+ // Remove the hbck znode
+ cleanupHbckZnode();
+
// Remove the hbck lock
unlockHbck();
@@ -750,6 +794,7 @@ public class HBaseFsck extends Configured implements Closeable {
@Override
public void close() throws IOException {
try {
+ cleanupHbckZnode();
unlockHbck();
} catch (Exception io) {
LOG.warn(io);
@@ -4222,38 +4267,6 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
- * Disable the load balancer.
- */
- public static void setDisableBalancer() {
- disableBalancer = true;
- }
-
- /**
- * Disable the split and merge
- */
- public static void setDisableSplitAndMerge() {
- disableSplitAndMerge = true;
- }
-
- /**
- * The balancer should be disabled if we are modifying HBase.
- * It can be disabled if you want to prevent region movement from causing
- * false positives.
- */
- public boolean shouldDisableBalancer() {
- return fixAny || disableBalancer;
- }
-
- /**
- * The split and merge should be disabled if we are modifying HBase.
- * It can be disabled if you want to prevent region movement from causing
- * false positives.
- */
- public boolean shouldDisableSplitAndMerge() {
- return fixAny || disableSplitAndMerge;
- }
-
- /**
* Set summary mode.
* Print only summary of the tables and status (OK or INCONSISTENT)
*/
@@ -4514,7 +4527,6 @@ public class HBaseFsck extends Configured implements Closeable {
out.println(" -sidelineDir <hdfs://> HDFS path to backup existing meta.");
out.println(" -boundaries Verify that regions boundaries are the same between META and store files.");
out.println(" -exclusive Abort if another hbck is exclusive or fixing.");
- out.println(" -disableBalancer Disable the load balancer.");
out.println("");
out.println(" Metadata Repair options: (expert features, use with caution!)");
@@ -4610,10 +4622,6 @@ public class HBaseFsck extends Configured implements Closeable {
setDisplayFullReport();
} else if (cmd.equals("-exclusive")) {
setForceExclusive();
- } else if (cmd.equals("-disableBalancer")) {
- setDisableBalancer();
- } else if (cmd.equals("-disableSplitAndMerge")) {
- setDisableSplitAndMerge();
} else if (cmd.equals("-timelag")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
new file mode 100644
index 0000000..fc0e05f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
@@ -0,0 +1,81 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Tracks the master Maintenance Mode via ZK.
+ */
+@InterfaceAudience.Private
+public class MasterMaintenanceModeTracker extends ZooKeeperListener {
+ private boolean hasChildren;
+
+ public MasterMaintenanceModeTracker(ZooKeeperWatcher watcher) {
+ super(watcher);
+ hasChildren = false;
+ }
+
+ public boolean isInMaintenanceMode() {
+ return hasChildren;
+ }
+
+ private void update(String path) {
+ if (path.startsWith(ZooKeeperWatcher.masterMaintZNode)) {
+ update();
+ }
+ }
+
+ private void update() {
+ try {
+ List<String> children =
+ ZKUtil.listChildrenAndWatchForNewChildren(watcher, ZooKeeperWatcher.masterMaintZNode);
+ hasChildren = (children != null && children.size() > 0);
+ } catch (KeeperException e) {
+ // Ignore the ZK keeper exception
+ hasChildren = false;
+ }
+ }
+
+ /**
+ * Starts the tracking of whether master is in Maintenance Mode.
+ */
+ public void start() {
+ watcher.registerListener(this);
+ update();
+ }
+
+ @Override
+ public void nodeCreated(String path) {
+ update(path);
+ }
+
+ @Override
+ public void nodeDeleted(String path) {
+ update(path);
+ }
+
+ @Override
+ public void nodeChildrenChanged(String path) {
+ update(path);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
index 2dad987..83ab350 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
@@ -146,6 +146,4 @@ public class SplitOrMergeTracker {
return builder.build();
}
}
-
-
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index c7a42d9..0a86ecb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -307,6 +307,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
+ public boolean isInMaintenanceMode() {
+ return false;
+ }
+
+ @Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
return 0;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index 7f0f6db..e03a0d5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@@ -78,7 +77,6 @@ import java.util.LinkedList;
import java.util.List;
import java.util.HashMap;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
[3/3] hbase git commit: HBASE-16008 A robust way deal with early
termination of HBCK (Stephen Yuan Jiang)
Posted by sy...@apache.org.
HBASE-16008 A robust way deal with early termination of HBCK (Stephen Yuan Jiang)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bdd7782f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bdd7782f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bdd7782f
Branch: refs/heads/master
Commit: bdd7782f054a4740bb63f4d9781ffe083d51e4bf
Parents: cd0b85e
Author: Stephen Yuan Jiang <sy...@gmail.com>
Authored: Sat Jul 23 14:47:11 2016 -0700
Committer: Stephen Yuan Jiang <sy...@gmail.com>
Committed: Sat Jul 23 14:47:11 2016 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/client/Admin.java | 7 +
.../hbase/client/ConnectionImplementation.java | 7 +
.../apache/hadoop/hbase/client/HBaseAdmin.java | 15 +
.../hbase/zookeeper/ZooKeeperWatcher.java | 5 +
.../hbase/protobuf/generated/MasterProtos.java | 2574 ++++++++++++------
hbase-protocol/src/main/protobuf/Master.proto | 13 +
.../hadoop/hbase/master/CatalogJanitor.java | 11 +
.../org/apache/hadoop/hbase/master/HMaster.java | 44 +-
.../hadoop/hbase/master/MasterRpcServices.java | 9 +
.../hadoop/hbase/master/MasterServices.java | 5 +
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 156 +-
.../zookeeper/MasterMaintenanceModeTracker.java | 81 +
.../hbase/zookeeper/SplitOrMergeTracker.java | 2 -
.../hbase/master/MockNoopMasterServices.java | 5 +
.../hadoop/hbase/util/TestHBaseFsckOneRS.java | 2 -
15 files changed, 2011 insertions(+), 925 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index df79dcf..0610517 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -959,6 +959,13 @@ public interface Admin extends Abortable, Closeable {
void stopMaster() throws IOException;
/**
+ * Check whether Master is in maintenance mode
+ *
+ * @throws IOException if a remote or network exception occurs
+ */
+ boolean isMasterInMaintenanceMode() throws IOException;
+
+ /**
* Stop the designated regionserver
*
* @param hostnamePort Hostname and port delimited by a <code>:</code> as in
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 9b913c8..bb5c996 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1416,6 +1416,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
}
@Override
+ public MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+ final RpcController controller,
+ final MasterProtos.IsInMaintenanceModeRequest request) throws ServiceException {
+ return stub.isMasterInMaintenanceMode(controller, request);
+ }
+
+ @Override
public MasterProtos.BalanceResponse balance(RpcController controller,
MasterProtos.BalanceRequest request) throws ServiceException {
return stub.balance(controller, request);
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 074fe7f..29650ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -130,6 +130,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterSta
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
@@ -1933,6 +1935,19 @@ public class HBaseAdmin implements Admin {
}
@Override
+ public boolean isMasterInMaintenanceMode() throws IOException {
+ return executeCallable(new MasterCallable<IsInMaintenanceModeResponse>(getConnection()) {
+ @Override
+ public IsInMaintenanceModeResponse call(int callTimeout) throws ServiceException {
+ PayloadCarryingRpcController controller = rpcControllerFactory.newController();
+ controller.setCallTimeout(callTimeout);
+ return master.isMasterInMaintenanceMode(
+ controller, IsInMaintenanceModeRequest.newBuilder().build());
+ }
+ }).getInMaintenanceMode();
+ }
+
+ @Override
public ClusterStatus getClusterStatus() throws IOException {
return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 7cbfc98..5ef7171 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -123,6 +123,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
public String recoveringRegionsZNode;
// znode containing namespace descriptors
public static String namespaceZNode = "namespace";
+ // znode of indicating master maintenance mode
+ public static String masterMaintZNode = "masterMaintenance";
public final static String META_ZNODE_PREFIX = "meta-region-server";
@@ -194,6 +196,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode);
ZKUtil.createAndFailSilent(this, tableLockZNode);
ZKUtil.createAndFailSilent(this, recoveringRegionsZNode);
+ ZKUtil.createAndFailSilent(this, masterMaintZNode);
} catch (KeeperException e) {
throw new ZooKeeperConnectionException(
prefix("Unexpected KeeperException creating base node"), e);
@@ -442,6 +445,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
conf.get("zookeeper.znode.recovering.regions", "recovering-regions"));
namespaceZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.namespace", "namespace"));
+ masterMaintZNode = ZKUtil.joinZNode(baseZNode,
+ conf.get("zookeeper.znode.masterMaintenance", "master-maintenance"));
}
/**
[2/3] hbase git commit: HBASE-16008 A robust way deal with early
termination of HBCK (Stephen Yuan Jiang)
Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/bdd7782f/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 6daf889..c6477fa 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -26500,38 +26500,28 @@ public final class MasterProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.StopMasterResponse)
}
- public interface BalanceRequestOrBuilder
+ public interface IsInMaintenanceModeRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
- // optional bool force = 1;
- /**
- * <code>optional bool force = 1;</code>
- */
- boolean hasForce();
- /**
- * <code>optional bool force = 1;</code>
- */
- boolean getForce();
}
/**
- * Protobuf type {@code hbase.pb.BalanceRequest}
+ * Protobuf type {@code hbase.pb.IsInMaintenanceModeRequest}
*/
- public static final class BalanceRequest extends
+ public static final class IsInMaintenanceModeRequest extends
com.google.protobuf.GeneratedMessage
- implements BalanceRequestOrBuilder {
- // Use BalanceRequest.newBuilder() to construct.
- private BalanceRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ implements IsInMaintenanceModeRequestOrBuilder {
+ // Use IsInMaintenanceModeRequest.newBuilder() to construct.
+ private IsInMaintenanceModeRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
- private BalanceRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+ private IsInMaintenanceModeRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- private static final BalanceRequest defaultInstance;
- public static BalanceRequest getDefaultInstance() {
+ private static final IsInMaintenanceModeRequest defaultInstance;
+ public static IsInMaintenanceModeRequest getDefaultInstance() {
return defaultInstance;
}
- public BalanceRequest getDefaultInstanceForType() {
+ public IsInMaintenanceModeRequest getDefaultInstanceForType() {
return defaultInstance;
}
@@ -26541,12 +26531,11 @@ public final class MasterProtos {
getUnknownFields() {
return this.unknownFields;
}
- private BalanceRequest(
+ private IsInMaintenanceModeRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
- int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
@@ -26564,11 +26553,6 @@ public final class MasterProtos {
}
break;
}
- case 8: {
- bitField0_ |= 0x00000001;
- force_ = input.readBool();
- break;
- }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -26583,50 +26567,32 @@ public final class MasterProtos {
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.Builder.class);
}
- public static com.google.protobuf.Parser<BalanceRequest> PARSER =
- new com.google.protobuf.AbstractParser<BalanceRequest>() {
- public BalanceRequest parsePartialFrom(
+ public static com.google.protobuf.Parser<IsInMaintenanceModeRequest> PARSER =
+ new com.google.protobuf.AbstractParser<IsInMaintenanceModeRequest>() {
+ public IsInMaintenanceModeRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return new BalanceRequest(input, extensionRegistry);
+ return new IsInMaintenanceModeRequest(input, extensionRegistry);
}
};
@java.lang.Override
- public com.google.protobuf.Parser<BalanceRequest> getParserForType() {
+ public com.google.protobuf.Parser<IsInMaintenanceModeRequest> getParserForType() {
return PARSER;
}
- private int bitField0_;
- // optional bool force = 1;
- public static final int FORCE_FIELD_NUMBER = 1;
- private boolean force_;
- /**
- * <code>optional bool force = 1;</code>
- */
- public boolean hasForce() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional bool force = 1;</code>
- */
- public boolean getForce() {
- return force_;
- }
-
private void initFields() {
- force_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -26640,9 +26606,6 @@ public final class MasterProtos {
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBool(1, force_);
- }
getUnknownFields().writeTo(output);
}
@@ -26652,10 +26615,6 @@ public final class MasterProtos {
if (size != -1) return size;
size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(1, force_);
- }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -26673,17 +26632,12 @@ public final class MasterProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)) {
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)) {
return super.equals(obj);
}
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) obj;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) obj;
boolean result = true;
- result = result && (hasForce() == other.hasForce());
- if (hasForce()) {
- result = result && (getForce()
- == other.getForce());
- }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -26697,62 +26651,58 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasForce()) {
- hash = (37 * hash) + FORCE_FIELD_NUMBER;
- hash = (53 * hash) + hashBoolean(getForce());
- }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(byte[] data)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -26761,7 +26711,7 @@ public final class MasterProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -26773,24 +26723,24 @@ public final class MasterProtos {
return builder;
}
/**
- * Protobuf type {@code hbase.pb.BalanceRequest}
+ * Protobuf type {@code hbase.pb.IsInMaintenanceModeRequest}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequestOrBuilder {
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.Builder.class);
}
- // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.newBuilder()
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -26810,8 +26760,6 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
- force_ = false;
- bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@@ -26821,48 +26769,38 @@ public final class MasterProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance();
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest build() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = buildPartial();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.force_ = force_;
- result.bitField0_ = to_bitField0_;
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)other);
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other) {
- if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance()) return this;
- if (other.hasForce()) {
- setForce(other.getForce());
- }
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -26875,11 +26813,11 @@ public final class MasterProtos {
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parsedMessage = null;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) e.getUnfinishedMessage();
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
@@ -26888,84 +26826,50 @@ public final class MasterProtos {
}
return this;
}
- private int bitField0_;
- // optional bool force = 1;
- private boolean force_ ;
- /**
- * <code>optional bool force = 1;</code>
- */
- public boolean hasForce() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional bool force = 1;</code>
- */
- public boolean getForce() {
- return force_;
- }
- /**
- * <code>optional bool force = 1;</code>
- */
- public Builder setForce(boolean value) {
- bitField0_ |= 0x00000001;
- force_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional bool force = 1;</code>
- */
- public Builder clearForce() {
- bitField0_ = (bitField0_ & ~0x00000001);
- force_ = false;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceRequest)
+ // @@protoc_insertion_point(builder_scope:hbase.pb.IsInMaintenanceModeRequest)
}
static {
- defaultInstance = new BalanceRequest(true);
+ defaultInstance = new IsInMaintenanceModeRequest(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:hbase.pb.BalanceRequest)
+ // @@protoc_insertion_point(class_scope:hbase.pb.IsInMaintenanceModeRequest)
}
- public interface BalanceResponseOrBuilder
+ public interface IsInMaintenanceModeResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required bool balancer_ran = 1;
+ // required bool inMaintenanceMode = 1;
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- boolean hasBalancerRan();
+ boolean hasInMaintenanceMode();
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- boolean getBalancerRan();
+ boolean getInMaintenanceMode();
}
/**
- * Protobuf type {@code hbase.pb.BalanceResponse}
+ * Protobuf type {@code hbase.pb.IsInMaintenanceModeResponse}
*/
- public static final class BalanceResponse extends
+ public static final class IsInMaintenanceModeResponse extends
com.google.protobuf.GeneratedMessage
- implements BalanceResponseOrBuilder {
- // Use BalanceResponse.newBuilder() to construct.
- private BalanceResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ implements IsInMaintenanceModeResponseOrBuilder {
+ // Use IsInMaintenanceModeResponse.newBuilder() to construct.
+ private IsInMaintenanceModeResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
- private BalanceResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+ private IsInMaintenanceModeResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- private static final BalanceResponse defaultInstance;
- public static BalanceResponse getDefaultInstance() {
+ private static final IsInMaintenanceModeResponse defaultInstance;
+ public static IsInMaintenanceModeResponse getDefaultInstance() {
return defaultInstance;
}
- public BalanceResponse getDefaultInstanceForType() {
+ public IsInMaintenanceModeResponse getDefaultInstanceForType() {
return defaultInstance;
}
@@ -26975,7 +26879,7 @@ public final class MasterProtos {
getUnknownFields() {
return this.unknownFields;
}
- private BalanceResponse(
+ private IsInMaintenanceModeResponse(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -27000,7 +26904,7 @@ public final class MasterProtos {
}
case 8: {
bitField0_ |= 0x00000001;
- balancerRan_ = input.readBool();
+ inMaintenanceMode_ = input.readBool();
break;
}
}
@@ -27017,57 +26921,57 @@ public final class MasterProtos {
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.Builder.class);
}
- public static com.google.protobuf.Parser<BalanceResponse> PARSER =
- new com.google.protobuf.AbstractParser<BalanceResponse>() {
- public BalanceResponse parsePartialFrom(
+ public static com.google.protobuf.Parser<IsInMaintenanceModeResponse> PARSER =
+ new com.google.protobuf.AbstractParser<IsInMaintenanceModeResponse>() {
+ public IsInMaintenanceModeResponse parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return new BalanceResponse(input, extensionRegistry);
+ return new IsInMaintenanceModeResponse(input, extensionRegistry);
}
};
@java.lang.Override
- public com.google.protobuf.Parser<BalanceResponse> getParserForType() {
+ public com.google.protobuf.Parser<IsInMaintenanceModeResponse> getParserForType() {
return PARSER;
}
private int bitField0_;
- // required bool balancer_ran = 1;
- public static final int BALANCER_RAN_FIELD_NUMBER = 1;
- private boolean balancerRan_;
+ // required bool inMaintenanceMode = 1;
+ public static final int INMAINTENANCEMODE_FIELD_NUMBER = 1;
+ private boolean inMaintenanceMode_;
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- public boolean hasBalancerRan() {
+ public boolean hasInMaintenanceMode() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- public boolean getBalancerRan() {
- return balancerRan_;
+ public boolean getInMaintenanceMode() {
+ return inMaintenanceMode_;
}
private void initFields() {
- balancerRan_ = false;
+ inMaintenanceMode_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (!hasBalancerRan()) {
+ if (!hasInMaintenanceMode()) {
memoizedIsInitialized = 0;
return false;
}
@@ -27079,7 +26983,7 @@ public final class MasterProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBool(1, balancerRan_);
+ output.writeBool(1, inMaintenanceMode_);
}
getUnknownFields().writeTo(output);
}
@@ -27092,7 +26996,7 @@ public final class MasterProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(1, balancerRan_);
+ .computeBoolSize(1, inMaintenanceMode_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -27111,16 +27015,16 @@ public final class MasterProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)) {
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse)) {
return super.equals(obj);
}
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) obj;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) obj;
boolean result = true;
- result = result && (hasBalancerRan() == other.hasBalancerRan());
- if (hasBalancerRan()) {
- result = result && (getBalancerRan()
- == other.getBalancerRan());
+ result = result && (hasInMaintenanceMode() == other.hasInMaintenanceMode());
+ if (hasInMaintenanceMode()) {
+ result = result && (getInMaintenanceMode()
+ == other.getInMaintenanceMode());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
@@ -27135,62 +27039,62 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasBalancerRan()) {
- hash = (37 * hash) + BALANCER_RAN_FIELD_NUMBER;
- hash = (53 * hash) + hashBoolean(getBalancerRan());
+ if (hasInMaintenanceMode()) {
+ hash = (37 * hash) + INMAINTENANCEMODE_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getInMaintenanceMode());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(byte[] data)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -27199,7 +27103,7 @@ public final class MasterProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -27211,24 +27115,24 @@ public final class MasterProtos {
return builder;
}
/**
- * Protobuf type {@code hbase.pb.BalanceResponse}
+ * Protobuf type {@code hbase.pb.IsInMaintenanceModeResponse}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponseOrBuilder {
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponseOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.Builder.class);
}
- // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.newBuilder()
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -27248,7 +27152,7 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
- balancerRan_ = false;
+ inMaintenanceMode_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@@ -27259,54 +27163,54 @@ public final class MasterProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance();
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse build() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = buildPartial();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse(this);
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- result.balancerRan_ = balancerRan_;
+ result.inMaintenanceMode_ = inMaintenanceMode_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)other);
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other) {
- if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()) return this;
- if (other.hasBalancerRan()) {
- setBalancerRan(other.getBalancerRan());
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()) return this;
+ if (other.hasInMaintenanceMode()) {
+ setInMaintenanceMode(other.getInMaintenanceMode());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
- if (!hasBalancerRan()) {
+ if (!hasInMaintenanceMode()) {
return false;
}
@@ -27317,11 +27221,11 @@ public final class MasterProtos {
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parsedMessage = null;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) e.getUnfinishedMessage();
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
@@ -27332,92 +27236,82 @@ public final class MasterProtos {
}
private int bitField0_;
- // required bool balancer_ran = 1;
- private boolean balancerRan_ ;
+ // required bool inMaintenanceMode = 1;
+ private boolean inMaintenanceMode_ ;
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- public boolean hasBalancerRan() {
+ public boolean hasInMaintenanceMode() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- public boolean getBalancerRan() {
- return balancerRan_;
+ public boolean getInMaintenanceMode() {
+ return inMaintenanceMode_;
}
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- public Builder setBalancerRan(boolean value) {
+ public Builder setInMaintenanceMode(boolean value) {
bitField0_ |= 0x00000001;
- balancerRan_ = value;
+ inMaintenanceMode_ = value;
onChanged();
return this;
}
/**
- * <code>required bool balancer_ran = 1;</code>
+ * <code>required bool inMaintenanceMode = 1;</code>
*/
- public Builder clearBalancerRan() {
+ public Builder clearInMaintenanceMode() {
bitField0_ = (bitField0_ & ~0x00000001);
- balancerRan_ = false;
+ inMaintenanceMode_ = false;
onChanged();
return this;
}
- // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceResponse)
+ // @@protoc_insertion_point(builder_scope:hbase.pb.IsInMaintenanceModeResponse)
}
static {
- defaultInstance = new BalanceResponse(true);
+ defaultInstance = new IsInMaintenanceModeResponse(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:hbase.pb.BalanceResponse)
+ // @@protoc_insertion_point(class_scope:hbase.pb.IsInMaintenanceModeResponse)
}
- public interface SetBalancerRunningRequestOrBuilder
+ public interface BalanceRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required bool on = 1;
- /**
- * <code>required bool on = 1;</code>
- */
- boolean hasOn();
- /**
- * <code>required bool on = 1;</code>
- */
- boolean getOn();
-
- // optional bool synchronous = 2;
+ // optional bool force = 1;
/**
- * <code>optional bool synchronous = 2;</code>
+ * <code>optional bool force = 1;</code>
*/
- boolean hasSynchronous();
+ boolean hasForce();
/**
- * <code>optional bool synchronous = 2;</code>
+ * <code>optional bool force = 1;</code>
*/
- boolean getSynchronous();
+ boolean getForce();
}
/**
- * Protobuf type {@code hbase.pb.SetBalancerRunningRequest}
+ * Protobuf type {@code hbase.pb.BalanceRequest}
*/
- public static final class SetBalancerRunningRequest extends
+ public static final class BalanceRequest extends
com.google.protobuf.GeneratedMessage
- implements SetBalancerRunningRequestOrBuilder {
- // Use SetBalancerRunningRequest.newBuilder() to construct.
- private SetBalancerRunningRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ implements BalanceRequestOrBuilder {
+ // Use BalanceRequest.newBuilder() to construct.
+ private BalanceRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
- private SetBalancerRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+ private BalanceRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- private static final SetBalancerRunningRequest defaultInstance;
- public static SetBalancerRunningRequest getDefaultInstance() {
+ private static final BalanceRequest defaultInstance;
+ public static BalanceRequest getDefaultInstance() {
return defaultInstance;
}
- public SetBalancerRunningRequest getDefaultInstanceForType() {
+ public BalanceRequest getDefaultInstanceForType() {
return defaultInstance;
}
@@ -27427,7 +27321,893 @@ public final class MasterProtos {
getUnknownFields() {
return this.unknownFields;
}
- private SetBalancerRunningRequest(
+ private BalanceRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ force_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<BalanceRequest> PARSER =
+ new com.google.protobuf.AbstractParser<BalanceRequest>() {
+ public BalanceRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BalanceRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<BalanceRequest> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional bool force = 1;
+ public static final int FORCE_FIELD_NUMBER = 1;
+ private boolean force_;
+ /**
+ * <code>optional bool force = 1;</code>
+ */
+ public boolean hasForce() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bool force = 1;</code>
+ */
+ public boolean getForce() {
+ return force_;
+ }
+
+ private void initFields() {
+ force_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, force_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, force_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) obj;
+
+ boolean result = true;
+ result = result && (hasForce() == other.hasForce());
+ if (hasForce()) {
+ result = result && (getForce()
+ == other.getForce());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasForce()) {
+ hash = (37 * hash) + FORCE_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getForce());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BalanceRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ force_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.force_ = force_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance()) return this;
+ if (other.hasForce()) {
+ setForce(other.getForce());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional bool force = 1;
+ private boolean force_ ;
+ /**
+ * <code>optional bool force = 1;</code>
+ */
+ public boolean hasForce() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bool force = 1;</code>
+ */
+ public boolean getForce() {
+ return force_;
+ }
+ /**
+ * <code>optional bool force = 1;</code>
+ */
+ public Builder setForce(boolean value) {
+ bitField0_ |= 0x00000001;
+ force_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool force = 1;</code>
+ */
+ public Builder clearForce() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ force_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceRequest)
+ }
+
+ static {
+ defaultInstance = new BalanceRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BalanceRequest)
+ }
+
+ public interface BalanceResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool balancer_ran = 1;
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ boolean hasBalancerRan();
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ boolean getBalancerRan();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BalanceResponse}
+ */
+ public static final class BalanceResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements BalanceResponseOrBuilder {
+ // Use BalanceResponse.newBuilder() to construct.
+ private BalanceResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BalanceResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BalanceResponse defaultInstance;
+ public static BalanceResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BalanceResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BalanceResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ balancerRan_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<BalanceResponse> PARSER =
+ new com.google.protobuf.AbstractParser<BalanceResponse>() {
+ public BalanceResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BalanceResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<BalanceResponse> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bool balancer_ran = 1;
+ public static final int BALANCER_RAN_FIELD_NUMBER = 1;
+ private boolean balancerRan_;
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ public boolean hasBalancerRan() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ public boolean getBalancerRan() {
+ return balancerRan_;
+ }
+
+ private void initFields() {
+ balancerRan_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasBalancerRan()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, balancerRan_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, balancerRan_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) obj;
+
+ boolean result = true;
+ result = result && (hasBalancerRan() == other.hasBalancerRan());
+ if (hasBalancerRan()) {
+ result = result && (getBalancerRan()
+ == other.getBalancerRan());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasBalancerRan()) {
+ hash = (37 * hash) + BALANCER_RAN_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getBalancerRan());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BalanceResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ balancerRan_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BalanceResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.balancerRan_ = balancerRan_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()) return this;
+ if (other.hasBalancerRan()) {
+ setBalancerRan(other.getBalancerRan());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasBalancerRan()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bool balancer_ran = 1;
+ private boolean balancerRan_ ;
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ public boolean hasBalancerRan() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ public boolean getBalancerRan() {
+ return balancerRan_;
+ }
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ public Builder setBalancerRan(boolean value) {
+ bitField0_ |= 0x00000001;
+ balancerRan_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool balancer_ran = 1;</code>
+ */
+ public Builder clearBalancerRan() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ balancerRan_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceResponse)
+ }
+
+ static {
+ defaultInstance = new BalanceResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BalanceResponse)
+ }
+
+ public interface SetBalancerRunningRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool on = 1;
+ /**
+ * <code>required bool on = 1;</code>
+ */
+ boolean hasOn();
+ /**
+ * <code>required bool on = 1;</code>
+ */
+ boolean getOn();
+
+ // optional bool synchronous = 2;
+ /**
+ * <code>optional bool synchronous = 2;</code>
+ */
+ boolean hasSynchronous();
+ /**
+ * <code>optional bool synchronous = 2;</code>
+ */
+ boolean getSynchronous();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.SetBalancerRunningRequest}
+ */
+ public static final class SetBalancerRunningRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements SetBalancerRunningRequestOrBuilder {
+ // Use SetBalancerRunningRequest.newBuilder() to construct.
+ private SetBalancerRunningRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SetBalancerRunningRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SetBalancerRunningRequest defaultInstance;
+ public static SetBalancerRunningRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SetBalancerRunningRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SetBalancerRunningRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -60044,6 +60824,19 @@ public final class MasterProtos {
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse> done);
/**
+ * <code>rpc IsMasterInMaintenanceMode(.hbase.pb.IsInMaintenanceModeRequest) returns (.hbase.pb.IsInMaintenanceModeResponse);</code>
+ *
+ * <pre>
+ **
+ * Query whether the Master is in maintenance mode.
+ * </pre>
+ */
+ public abstract void isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse> done);
+
+ /**
* <code>rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse);</code>
*
* <pre>
@@ -60657,6 +61450,14 @@ public final class MasterProtos {
}
@java.lang.Override
+ public void isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse> done) {
+ impl.isMasterInMaintenanceMode(controller, request, done);
+ }
+
+ @java.lang.Override
public void balance(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request,
@@ -61001,74 +61802,76 @@ public final class MasterProtos {
case 20:
return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)request);
case 21:
- return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request);
+ return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request);
case 22:
- return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request);
+ return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request);
case 23:
- return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request);
+ return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request);
case 24:
- return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request);
+ return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request);
case 25:
- return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
+ return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request);
case 26:
- return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
+ return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
case 27:
- return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
+ return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
case 28:
- return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
+ return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
case 29:
- return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
+ return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
case 30:
- return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
+ return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
case 31:
- return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
+ return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
case 32:
- return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
+ return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
case 33:
- return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
+ return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
case 34:
- return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
+ return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
case 35:
- return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
+ return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
case 36:
- return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
+ return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
case 37:
- return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
+ return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
case 38:
- return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
+ return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
case 39:
- return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
+ return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
case 40:
- return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
+ return
<TRUNCATED>