You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2015/05/01 17:27:47 UTC
[01/50] [abbrv] hbase git commit: HBASE-13381 Expand TestSizeFailures
to include small scans (Josh Elser)
Repository: hbase
Updated Branches:
refs/heads/hbase-11339 eba8a708a -> 0e20bbf6a
HBASE-13381 Expand TestSizeFailures to include small scans (Josh Elser)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3cd929ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3cd929ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3cd929ee
Branch: refs/heads/hbase-11339
Commit: 3cd929eea253afd632b6def7c24df434872a2d7d
Parents: 66f7bf4
Author: tedyu <yu...@gmail.com>
Authored: Thu Apr 9 17:57:57 2015 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Thu Apr 9 17:57:57 2015 -0700
----------------------------------------------------------------------
.../hadoop/hbase/client/TestSizeFailures.java | 204 ++++++++++---------
1 file changed, 109 insertions(+), 95 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/3cd929ee/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
index a77c50c..a0b0f70 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java
@@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
-import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
-import java.util.TreeSet;
+import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -41,16 +39,17 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import com.google.common.collect.Maps;
+
@Category(LargeTests.class)
public class TestSizeFailures {
- final Log LOG = LogFactory.getLog(getClass());
+ static final Log LOG = LogFactory.getLog(TestSizeFailures.class);
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static byte [] FAMILY = Bytes.toBytes("testFamily");
protected static int SLAVES = 1;
+ private static TableName TABLENAME;
+ private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 10;
- /**
- * @throws java.lang.Exception
- */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Uncomment the following lines if more verbosity is needed for
@@ -61,11 +60,49 @@ public class TestSizeFailures {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean("hbase.table.sanity.checks", true); // ignore sanity checks in the server
TEST_UTIL.startMiniCluster(SLAVES);
+
+ // Write a bunch of data
+ TABLENAME = TableName.valueOf("testSizeFailures");
+ List<byte[]> qualifiers = new ArrayList<>();
+ for (int i = 1; i <= 10; i++) {
+ qualifiers.add(Bytes.toBytes(Integer.toString(i)));
+ }
+
+ HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+ HTableDescriptor desc = new HTableDescriptor(TABLENAME);
+ desc.addFamily(hcd);
+ byte[][] splits = new byte[9][2];
+ for (int i = 1; i < 10; i++) {
+ int split = 48 + i;
+ splits[i - 1][0] = (byte) (split >>> 8);
+ splits[i - 1][0] = (byte) (split);
+ }
+ TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
+ Connection conn = TEST_UTIL.getConnection();
+
+ try (Table table = conn.getTable(TABLENAME)) {
+ List<Put> puts = new LinkedList<>();
+ for (int i = 0; i < NUM_ROWS; i++) {
+ Put p = new Put(Bytes.toBytes(Integer.toString(i)));
+ for (int j = 0; j < NUM_COLS; j++) {
+ byte[] value = new byte[50];
+ Bytes.random(value);
+ p.addColumn(FAMILY, Bytes.toBytes(Integer.toString(j)), value);
+ }
+ puts.add(p);
+
+ if (puts.size() == 1000) {
+ table.batch(puts, new Object[1000]);
+ puts.clear();
+ }
+ }
+
+ if (puts.size() > 0) {
+ table.batch(puts, new Object[puts.size()]);
+ }
+ }
}
- /**
- * @throws java.lang.Exception
- */
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
@@ -74,89 +111,66 @@ public class TestSizeFailures {
/**
* Basic client side validation of HBASE-13262
*/
- @Test
- public void testScannerSeesAllRecords() throws Exception {
- final int NUM_ROWS = 1000 * 1000, NUM_COLS = 10;
- final TableName TABLENAME = TableName.valueOf("testScannerSeesAllRecords");
- List<byte[]> qualifiers = new ArrayList<>();
- for (int i = 1; i <= 10; i++) {
- qualifiers.add(Bytes.toBytes(Integer.toString(i)));
- }
-
- HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
- HTableDescriptor desc = new HTableDescriptor(TABLENAME);
- desc.addFamily(hcd);
- byte[][] splits = new byte[9][2];
- for (int i = 1; i < 10; i++) {
- int split = 48 + i;
- splits[i - 1][0] = (byte) (split >>> 8);
- splits[i - 1][0] = (byte) (split);
- }
- TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
- Connection conn = TEST_UTIL.getConnection();
-
- try (Table table = conn.getTable(TABLENAME)) {
- List<Put> puts = new LinkedList<>();
- for (int i = 0; i < NUM_ROWS; i++) {
- Put p = new Put(Bytes.toBytes(Integer.toString(i)));
- for (int j = 0; j < NUM_COLS; j++) {
- byte[] value = new byte[50];
- Bytes.random(value);
- p.addColumn(FAMILY, Bytes.toBytes(Integer.toString(j)), value);
- }
- puts.add(p);
-
- if (puts.size() == 1000) {
- Object[] results = new Object[1000];
- try {
- table.batch(puts, results);
- } catch (IOException e) {
- LOG.error("Failed to write data", e);
- LOG.debug("Errors: " + Arrays.toString(results));
- }
-
- puts.clear();
- }
- }
-
- if (puts.size() > 0) {
- Object[] results = new Object[puts.size()];
- try {
- table.batch(puts, results);
- } catch (IOException e) {
- LOG.error("Failed to write data", e);
- LOG.debug("Errors: " + Arrays.toString(results));
- }
- }
-
- // Flush the memstore to disk
- TEST_UTIL.getHBaseAdmin().flush(TABLENAME);
-
- TreeSet<Integer> rows = new TreeSet<>();
- long rowsObserved = 0l;
- long entriesObserved = 0l;
- Scan s = new Scan();
- s.addFamily(FAMILY);
- s.setMaxResultSize(-1);
- s.setBatch(-1);
- s.setCaching(500);
- ResultScanner scanner = table.getScanner(s);
- // Read all the records in the table
- for (Result result : scanner) {
- rowsObserved++;
- String row = new String(result.getRow());
- rows.add(Integer.parseInt(row));
- while (result.advance()) {
- entriesObserved++;
- // result.current();
- }
- }
-
- // Verify that we see 1M rows and 10M cells
- assertEquals(NUM_ROWS, rowsObserved);
- assertEquals(NUM_ROWS * NUM_COLS, entriesObserved);
- }
-
- conn.close();
- }
+ @Test
+ public void testScannerSeesAllRecords() throws Exception {
+ Connection conn = TEST_UTIL.getConnection();
+ try (Table table = conn.getTable(TABLENAME)) {
+ Scan s = new Scan();
+ s.addFamily(FAMILY);
+ s.setMaxResultSize(-1);
+ s.setBatch(-1);
+ s.setCaching(500);
+ Entry<Long,Long> entry = sumTable(table.getScanner(s));
+ long rowsObserved = entry.getKey();
+ long entriesObserved = entry.getValue();
+
+ // Verify that we see 1M rows and 10M cells
+ assertEquals(NUM_ROWS, rowsObserved);
+ assertEquals(NUM_ROWS * NUM_COLS, entriesObserved);
+ }
+ }
+
+ /**
+ * Basic client side validation of HBASE-13262
+ */
+ @Test
+ public void testSmallScannerSeesAllRecords() throws Exception {
+ Connection conn = TEST_UTIL.getConnection();
+ try (Table table = conn.getTable(TABLENAME)) {
+ Scan s = new Scan();
+ s.setSmall(true);
+ s.addFamily(FAMILY);
+ s.setMaxResultSize(-1);
+ s.setBatch(-1);
+ s.setCaching(500);
+ Entry<Long,Long> entry = sumTable(table.getScanner(s));
+ long rowsObserved = entry.getKey();
+ long entriesObserved = entry.getValue();
+
+ // Verify that we see 1M rows and 10M cells
+ assertEquals(NUM_ROWS, rowsObserved);
+ assertEquals(NUM_ROWS * NUM_COLS, entriesObserved);
+ }
+ }
+
+ /**
+ * Count the number of rows and the number of entries from a scanner
+ *
+ * @param scanner
+ * The Scanner
+ * @return An entry where the first item is rows observed and the second is entries observed.
+ */
+ private Entry<Long,Long> sumTable(ResultScanner scanner) {
+ long rowsObserved = 0l;
+ long entriesObserved = 0l;
+
+ // Read all the records in the table
+ for (Result result : scanner) {
+ rowsObserved++;
+ while (result.advance()) {
+ entriesObserved++;
+ }
+ }
+ return Maps.immutableEntry(rowsObserved,entriesObserved);
+ }
}
[09/50] [abbrv] hbase git commit: HBASE-13203 Procedure v2 - master
create/delete table
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
new file mode 100644
index 0000000..903dbd3
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+
+/**
+ * Helper to synchronously wait on conditions.
+ * This will be removed in the future (mainly when the AssignmentManager will be
+ * replaced with a Procedure version) by using ProcedureYieldException,
+ * and the queue will handle waiting and scheduling based on events.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class ProcedureSyncWait {
+ private static final Log LOG = LogFactory.getLog(ProcedureSyncWait.class);
+
+ private ProcedureSyncWait() {}
+
+ @InterfaceAudience.Private
+ public interface Predicate<T> {
+ T evaluate() throws IOException;
+ }
+
+ public static byte[] submitAndWaitProcedure(ProcedureExecutor<MasterProcedureEnv> procExec,
+ final Procedure proc) throws IOException {
+ long procId = procExec.submitProcedure(proc);
+ return waitForProcedureToComplete(procExec, procId);
+ }
+
+ public static byte[] waitForProcedureToComplete(ProcedureExecutor<MasterProcedureEnv> procExec,
+ final long procId) throws IOException {
+ while (!procExec.isFinished(procId) && procExec.isRunning()) {
+ // TODO: add a config to make it tunable
+ // Dev Consideration: are we waiting forever, or we can set up some timeout value?
+ Threads.sleepWithoutInterrupt(250);
+ }
+ ProcedureResult result = procExec.getResult(procId);
+ if (result != null) {
+ if (result.isFailed()) {
+ // If the procedure fails, we should always have an exception captured. Throw it.
+ throw result.getException().unwrapRemoteException();
+ }
+ return result.getResult();
+ } else {
+ if (procExec.isRunning()) {
+ throw new IOException("Procedure " + procId + "not found");
+ } else {
+ throw new IOException("The Master is Aborting");
+ }
+ }
+ }
+
+ public static <T> T waitFor(MasterProcedureEnv env, String purpose, Predicate<T> predicate)
+ throws IOException {
+ final Configuration conf = env.getMasterConfiguration();
+ final long waitTime = conf.getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
+ final long waitingTimeForEvents = conf.getInt("hbase.master.event.waiting.time", 1000);
+ return waitFor(env, waitTime, waitingTimeForEvents, purpose, predicate);
+ }
+
+ public static <T> T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents,
+ String purpose, Predicate<T> predicate) throws IOException {
+ final long done = EnvironmentEdgeManager.currentTime() + waitTime;
+ do {
+ T result = predicate.evaluate();
+ if (result != null && !result.equals(Boolean.FALSE)) {
+ return result;
+ }
+ try {
+ Thread.sleep(waitingTimeForEvents);
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while sleeping, waiting on " + purpose);
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ LOG.debug("Waiting on " + purpose);
+ } while (EnvironmentEdgeManager.currentTime() < done && env.isRunning());
+
+ throw new TimeoutIOException("Timed out while waiting on " + purpose);
+ }
+
+ protected static void waitMetaRegions(final MasterProcedureEnv env) throws IOException {
+ int timeout = env.getMasterConfiguration().getInt("hbase.client.catalog.timeout", 10000);
+ try {
+ if (env.getMasterServices().getMetaTableLocator().waitMetaRegionLocation(
+ env.getMasterServices().getZooKeeper(), timeout) == null) {
+ throw new NotAllMetaRegionsOnlineException();
+ }
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ }
+
+ protected static void waitRegionServers(final MasterProcedureEnv env) throws IOException {
+ final ServerManager sm = env.getMasterServices().getServerManager();
+ ProcedureSyncWait.waitFor(env, "server to assign region(s)",
+ new ProcedureSyncWait.Predicate<Boolean>() {
+ @Override
+ public Boolean evaluate() throws IOException {
+ List<ServerName> servers = sm.createDestinationServersList();
+ return servers != null && !servers.isEmpty();
+ }
+ });
+ }
+
+ protected static List<HRegionInfo> getRegionsFromMeta(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ return ProcedureSyncWait.waitFor(env, "regions of table=" + tableName + " from meta",
+ new ProcedureSyncWait.Predicate<List<HRegionInfo>>() {
+ @Override
+ public List<HRegionInfo> evaluate() throws IOException {
+ if (TableName.META_TABLE_NAME.equals(tableName)) {
+ return new MetaTableLocator().getMetaRegions(env.getMasterServices().getZooKeeper());
+ }
+ return MetaTableAccessor.getTableRegions(env.getMasterServices().getConnection(),tableName);
+ }
+ });
+ }
+
+ protected static void waitRegionInTransition(final MasterProcedureEnv env,
+ final List<HRegionInfo> regions) throws IOException, CoordinatedStateException {
+ final AssignmentManager am = env.getMasterServices().getAssignmentManager();
+ final RegionStates states = am.getRegionStates();
+ for (final HRegionInfo region : regions) {
+ ProcedureSyncWait.waitFor(env, "regions " + region.getRegionNameAsString() + " in transition",
+ new ProcedureSyncWait.Predicate<Boolean>() {
+ @Override
+ public Boolean evaluate() throws IOException {
+ if (states.isRegionInState(region, State.FAILED_OPEN)) {
+ am.regionOffline(region);
+ }
+ return !states.isRegionInTransition(region);
+ }
+ });
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
new file mode 100644
index 0000000..76ca094
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.TableName;
+
+/**
+ * Procedures that operates on a specific Table (e.g. create, delete, snapshot, ...)
+ * must implement this interface to allow the system handle the lock/concurrency problems.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface TableProcedureInterface {
+ public enum TableOperationType { CREATE, DELETE, EDIT, READ };
+
+ /**
+ * @return the name of the table the procedure is operating on
+ */
+ TableName getTableName();
+
+ /**
+ * Given an operation type we can take decisions about what to do with pending operations.
+ * e.g. if we get a delete and we have some table operation pending (e.g. add column)
+ * we can abort those operations.
+ * @return the operation type that the procedure is executing.
+ */
+ TableOperationType getTableOperationType();
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 9893fc8..5fe5f8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.namespace.NamespaceAuditor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
@@ -444,14 +444,11 @@ public class MasterQuotaManager implements RegionStateListener {
new HRegionInfo(QuotaUtil.QUOTA_TABLE_NAME)
};
- masterServices.getExecutorService()
- .submit(new CreateTableHandler(masterServices,
- masterServices.getMasterFileSystem(),
- QuotaUtil.QUOTA_TABLE_DESC,
- masterServices.getConfiguration(),
- newRegions,
- masterServices)
- .prepare());
+ masterServices.getMasterProcedureExecutor()
+ .submitProcedure(new CreateTableProcedure(
+ masterServices.getMasterProcedureExecutor().getEnvironment(),
+ QuotaUtil.QUOTA_TABLE_DESC,
+ newRegions));
}
private static class NamedLock<T> {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index a515f8e..f15eb1b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -981,13 +981,14 @@ public class HRegionServer extends HasThread implements
// Send interrupts to wake up threads if sleeping so they notice shutdown.
// TODO: Should we check they are alive? If OOME could have exited already
- if(this.hMemManager != null) this.hMemManager.stop();
+ if (this.hMemManager != null) this.hMemManager.stop();
if (this.cacheFlusher != null) this.cacheFlusher.interruptIfNecessary();
if (this.compactSplitThread != null) this.compactSplitThread.interruptIfNecessary();
if (this.compactionChecker != null) this.compactionChecker.cancel(true);
if (this.healthCheckChore != null) this.healthCheckChore.cancel(true);
if (this.nonceManagerChore != null) this.nonceManagerChore.cancel(true);
if (this.storefileRefresher != null) this.storefileRefresher.cancel(true);
+ sendShutdownInterrupt();
// Stop the quota manager
if (rsQuotaManager != null) {
@@ -2073,6 +2074,12 @@ public class HRegionServer extends HasThread implements
}
/**
+ * Called on stop/abort before closing the cluster connection and meta locator.
+ */
+ protected void sendShutdownInterrupt() {
+ }
+
+ /**
* Wait on all threads to finish. Presumption is that all closes and stops
* have already been called.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 95d8a17..347cad5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -67,6 +67,30 @@ public abstract class ModifyRegionUtils {
void editRegion(final HRegionInfo region) throws IOException;
}
+ public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor,
+ byte[][] splitKeys) {
+ long regionId = System.currentTimeMillis();
+ HRegionInfo[] hRegionInfos = null;
+ if (splitKeys == null || splitKeys.length == 0) {
+ hRegionInfos = new HRegionInfo[]{
+ new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId)
+ };
+ } else {
+ int numRegions = splitKeys.length + 1;
+ hRegionInfos = new HRegionInfo[numRegions];
+ byte[] startKey = null;
+ byte[] endKey = null;
+ for (int i = 0; i < numRegions; i++) {
+ endKey = (i == splitKeys.length) ? null : splitKeys[i];
+ hRegionInfos[i] =
+ new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
+ false, regionId);
+ startKey = endKey;
+ }
+ }
+ return hRegionInfos;
+ }
+
/**
* Create new set of regions on the specified file-system.
* NOTE: that you should add the regions to hbase:meta after this operation.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 8ed49ff..2c13f39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -63,6 +63,8 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
@@ -261,6 +263,11 @@ public class TestCatalogJanitor {
}
@Override
+ public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return null;
+ }
+
+ @Override
public ServerManager getServerManager() {
return null;
}
@@ -912,7 +919,7 @@ public class TestCatalogJanitor {
MasterServices services = new MockMasterServices(server);
// create the janitor
-
+
CatalogJanitor janitor = new CatalogJanitor(server, services);
// Create regions.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
new file mode 100644
index 0000000..d6c19e1
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class MasterProcedureTestingUtility {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);
+
+ private MasterProcedureTestingUtility() {
+ }
+
+ public static HTableDescriptor createHTD(final TableName tableName, final String... family) {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ for (int i = 0; i < family.length; ++i) {
+ htd.addFamily(new HColumnDescriptor(family[i]));
+ }
+ return htd;
+ }
+
+ public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
+ final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
+ HTableDescriptor htd = createHTD(tableName, family);
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+ return regions;
+ }
+
+ public static void validateTableCreation(final HMaster master, final TableName tableName,
+ final HRegionInfo[] regions, String... family) throws IOException {
+ validateTableCreation(master, tableName, regions, true, family);
+ }
+
+ public static void validateTableCreation(final HMaster master, final TableName tableName,
+ final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException {
+ // check filesystem
+ final FileSystem fs = master.getMasterFileSystem().getFileSystem();
+ final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
+ assertTrue(fs.exists(tableDir));
+ List<Path> allRegionDirs = FSUtils.getRegionDirs(fs, tableDir);
+ for (int i = 0; i < regions.length; ++i) {
+ Path regionDir = new Path(tableDir, regions[i].getEncodedName());
+ assertTrue(regions[i] + " region dir does not exist", fs.exists(regionDir));
+ assertTrue(allRegionDirs.remove(regionDir));
+ List<Path> allFamilyDirs = FSUtils.getFamilyDirs(fs, regionDir);
+ for (int j = 0; j < family.length; ++j) {
+ final Path familyDir = new Path(regionDir, family[j]);
+ if (hasFamilyDirs) {
+ assertTrue(family[j] + " family dir does not exist", fs.exists(familyDir));
+ assertTrue(allFamilyDirs.remove(familyDir));
+ } else {
+ // TODO: WARN: Modify Table/Families does not create a family dir
+ if (!fs.exists(familyDir)) {
+ LOG.warn(family[j] + " family dir does not exist");
+ }
+ allFamilyDirs.remove(familyDir);
+ }
+ }
+ assertTrue("found extraneous families: " + allFamilyDirs, allFamilyDirs.isEmpty());
+ }
+ assertTrue("found extraneous regions: " + allRegionDirs, allRegionDirs.isEmpty());
+
+ // check meta
+ assertTrue(MetaTableAccessor.tableExists(master.getConnection(), tableName));
+ assertEquals(regions.length, countMetaRegions(master, tableName));
+
+ // check htd
+ TableDescriptor tableDesc = master.getTableDescriptors().getDescriptor(tableName);
+ assertTrue("table descriptor not found", tableDesc != null);
+ HTableDescriptor htd = tableDesc.getHTableDescriptor();
+ assertTrue("table descriptor not found", htd != null);
+ for (int i = 0; i < family.length; ++i) {
+ assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
+ }
+ assertEquals(family.length, htd.getFamilies().size());
+ }
+
+ public static void validateTableDeletion(final HMaster master, final TableName tableName,
+ final HRegionInfo[] regions, String... family) throws IOException {
+ // check filesystem
+ final FileSystem fs = master.getMasterFileSystem().getFileSystem();
+ final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
+ assertFalse(fs.exists(tableDir));
+
+ // check meta
+ assertFalse(MetaTableAccessor.tableExists(master.getConnection(), tableName));
+ assertEquals(0, countMetaRegions(master, tableName));
+
+ // check htd
+ assertTrue("found htd of deleted table",
+ master.getTableDescriptors().getDescriptor(tableName) == null);
+ }
+
+ private static int countMetaRegions(final HMaster master, final TableName tableName)
+ throws IOException {
+ final AtomicInteger actualRegCount = new AtomicInteger(0);
+ final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
+ @Override
+ public boolean visit(Result rowResult) throws IOException {
+ RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
+ if (list == null) {
+ LOG.warn("No serialized HRegionInfo in " + rowResult);
+ return true;
+ }
+ HRegionLocation l = list.getRegionLocation();
+ if (l == null) {
+ return true;
+ }
+ if (!l.getRegionInfo().getTable().equals(tableName)) {
+ return false;
+ }
+ if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
+ HRegionLocation[] locations = list.getRegionLocations();
+ for (HRegionLocation location : locations) {
+ if (location == null) continue;
+ ServerName serverName = location.getServerName();
+ // Make sure that regions are assigned to server
+ if (serverName != null && serverName.getHostAndPort() != null) {
+ actualRegCount.incrementAndGet();
+ }
+ }
+ return true;
+ }
+ };
+ MetaTableAccessor.scanMetaForTableRegions(master.getConnection(), visitor, tableName);
+ return actualRegCount.get();
+ }
+
+ public static <TState> void testRecoveryAndDoubleExecution(
+ final ProcedureExecutor<MasterProcedureEnv> procExec, final long procId,
+ final int numSteps, final TState[] states) throws Exception {
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ assertEquals(false, procExec.isRunning());
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ for (int i = 0; i < numSteps; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + states[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ assertEquals(true, procExec.isRunning());
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ }
+
+ public static <TState> void testRollbackAndDoubleExecution(
+ final ProcedureExecutor<MasterProcedureEnv> procExec, final long procId,
+ final int lastStep, final TState[] states) throws Exception {
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ for (int i = 0; i < lastStep; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + states[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+
+ // Restart the executor and rollback the step twice
+ // rollback step N - kill before store update
+ // restart executor/store
+ // rollback step N - save on store
+ MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener =
+ new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec);
+ procExec.registerListener(abortListener);
+ try {
+ for (int i = lastStep + 1; i >= 0; --i) {
+ LOG.info("Restart " + i +" rollback state: "+ states[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ } finally {
+ assertTrue(procExec.unregisterListener(abortListener));
+ }
+
+ ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId));
+ }
+
+ public static <TState> void testRollbackAndDoubleExecutionAfterPONR(
+ final ProcedureExecutor<MasterProcedureEnv> procExec, final long procId,
+ final int lastStep, final TState[] states) throws Exception {
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ for (int i = 0; i < lastStep; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + states[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+
+ // try to inject the abort
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener =
+ new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec);
+ procExec.registerListener(abortListener);
+ try {
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ LOG.info("Restart and execute");
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ } finally {
+ assertTrue(procExec.unregisterListener(abortListener));
+ }
+
+ assertEquals(true, procExec.isRunning());
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ }
+
+ public static <TState> void testRollbackRetriableFailure(
+ final ProcedureExecutor<MasterProcedureEnv> procExec, final long procId,
+ final int lastStep, final TState[] states) throws Exception {
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ for (int i = 0; i < lastStep; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + states[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+
+ // execute the rollback
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener =
+ new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec);
+ procExec.registerListener(abortListener);
+ try {
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ LOG.info("Restart and rollback");
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ } finally {
+ assertTrue(procExec.unregisterListener(abortListener));
+ }
+
+ ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId));
+ }
+
+ public static class InjectAbortOnLoadListener
+ implements ProcedureExecutor.ProcedureExecutorListener {
+ private final ProcedureExecutor<MasterProcedureEnv> procExec;
+
+ public InjectAbortOnLoadListener(final ProcedureExecutor<MasterProcedureEnv> procExec) {
+ this.procExec = procExec;
+ }
+
+ @Override
+ public void procedureLoaded(long procId) {
+ procExec.abort(procId);
+ }
+
+ @Override
+ public void procedureAdded(long procId) { /* no-op */ }
+
+ @Override
+ public void procedureFinished(long procId) { /* no-op */ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
new file mode 100644
index 0000000..7cd64b6
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -0,0 +1,257 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestCreateTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestCreateTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ resetProcExecutorTestingKillFlag();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ resetProcExecutorTestingKillFlag();
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ private void resetProcExecutorTestingKillFlag() {
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ assertTrue("expected executor to be running", procExec.isRunning());
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleCreate() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleCreate");
+ final byte[][] splitKeys = null;
+ testSimpleCreate(tableName, splitKeys);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleCreateWithSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleCreateWithSplits");
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ testSimpleCreate(tableName, splitKeys);
+ }
+
+ private void testSimpleCreate(final TableName tableName, byte[][] splitKeys) throws Exception {
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ @Test(timeout=60000, expected=TableExistsException.class)
+ public void testCreateExisting() throws Exception {
+ final TableName tableName = TableName.valueOf("testCreateExisting");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
+ final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
+
+ // create the table
+ long procId1 = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+
+ // create another with the same name
+ ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch();
+ long procId2 = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2));
+
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
+
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ latch2.await();
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+
+ // create the table
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ byte[][] splitKeys = null;
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+
+ // Restart the executor and execute the step twice
+ // NOTE: the 6 (number of CreateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec, procId, 6, CreateTableState.values());
+
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ @Test(timeout=90000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+
+ // create the table
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ htd.setRegionReplication(3);
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+
+ // NOTE: the 4 (number of CreateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec, procId, 4, CreateTableState.values());
+
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+
+ // are we able to create the table after a rollback?
+ resetProcExecutorTestingKillFlag();
+ testSimpleCreate(tableName, splitKeys);
+ }
+
+ @Test(timeout=90000)
+ public void testRollbackRetriableFailure() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackRetriableFailure");
+
+ // create the table
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new FaultyCreateTableProcedure(procExec.getEnvironment(), htd, regions));
+
+ // NOTE: the 4 (number of CreateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRollbackRetriableFailure(
+ procExec, procId, 4, CreateTableState.values());
+
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+
+ // are we able to create the table after a rollback?
+ resetProcExecutorTestingKillFlag();
+ testSimpleCreate(tableName, splitKeys);
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ public static class FaultyCreateTableProcedure extends CreateTableProcedure {
+ private int retries = 0;
+
+ public FaultyCreateTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ }
+
+ public FaultyCreateTableProcedure(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions)
+ throws IOException {
+ super(env, hTableDescriptor, newRegions);
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state)
+ throws IOException {
+ if (retries++ < 3) {
+ LOG.info("inject rollback failure state=" + state);
+ throw new IOException("injected failure number " + retries);
+ } else {
+ super.rollbackState(env, state);
+ retries = 0;
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
new file mode 100644
index 0000000..6795b22
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestDeleteTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestDeleteTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ assertTrue("expected executor to be running", procExec.isRunning());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout=60000, expected=TableNotFoundException.class)
+ public void testDeleteNotExistentTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteNotExistentTable");
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedurePrepareLatch latch = new ProcedurePrepareLatch.CompatibilityLatch();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch));
+ latch.await();
+ }
+
+ @Test(timeout=60000, expected=TableNotDisabledException.class)
+ public void testDeleteNotDisabledTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteNotDisabledTable");
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f");
+
+ ProcedurePrepareLatch latch = new ProcedurePrepareLatch.CompatibilityLatch();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch));
+ latch.await();
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteDeletedTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteDeletedTable");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "f");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // delete the table (that exists)
+ long procId1 = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ // delete the table (that will no longer exist)
+ long procId2 = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+
+ // First delete should succeed
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f");
+
+ // Second delete should fail with TableNotFound
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotFoundException);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleDelete() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleDelete");
+ final byte[][] splitKeys = null;
+ testSimpleDelete(tableName, splitKeys);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleDeleteWithSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleDeleteWithSplits");
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ testSimpleDelete(tableName, splitKeys);
+ }
+
+ private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) throws Exception {
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // delete the table
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+
+ // create the table
+ byte[][] splitKeys = null;
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+
+ // Restart the executor and execute the step twice
+ // NOTE: the 6 (number of DeleteTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec, procId, 6, DeleteTableState.values());
+
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
new file mode 100644
index 0000000..faf7845
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, LargeTests.class})
+public class TestMasterFailoverWithProcedures {
+ private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ }
+
+ @Before
+ public void setup() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(2, 1);
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Test(timeout=60000)
+ public void testWalRecoverLease() throws Exception {
+ final ProcedureStore masterStore = getMasterProcedureExecutor().getStore();
+ assertTrue("expected WALStore for this test", masterStore instanceof WALProcedureStore);
+
+ HMaster firstMaster = UTIL.getHBaseCluster().getMaster();
+ // Abort Latch for the master store
+ final CountDownLatch masterStoreAbort = new CountDownLatch(1);
+ masterStore.registerListener(new ProcedureStore.ProcedureStoreListener() {
+ @Override
+ public void abortProcess() {
+ LOG.debug("Abort store of Master");
+ masterStoreAbort.countDown();
+ }
+ });
+
+ // startup a fake master the new WAL store will take the lease
+ // and the active master should abort.
+ HMaster backupMaster3 = Mockito.mock(HMaster.class);
+ Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration();
+ Mockito.doReturn(true).when(backupMaster3).isActiveMaster();
+ final WALProcedureStore backupStore3 = new WALProcedureStore(firstMaster.getConfiguration(),
+ firstMaster.getMasterFileSystem().getFileSystem(),
+ ((WALProcedureStore)masterStore).getLogDir(),
+ new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3));
+ // Abort Latch for the test store
+ final CountDownLatch backupStore3Abort = new CountDownLatch(1);
+ backupStore3.registerListener(new ProcedureStore.ProcedureStoreListener() {
+ @Override
+ public void abortProcess() {
+ LOG.debug("Abort store of backupMaster3");
+ backupStore3Abort.countDown();
+ backupStore3.stop(true);
+ }
+ });
+ backupStore3.start(1);
+ backupStore3.recoverLease();
+
+ // Try to trigger a command on the master (WAL lease expired on the active one)
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf("mtb"), "f");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
+ LOG.debug("submit proc");
+ getMasterProcedureExecutor().submitProcedure(
+ new CreateTableProcedure(getMasterProcedureExecutor().getEnvironment(), htd, regions));
+ LOG.debug("wait master store abort");
+ masterStoreAbort.await();
+
+ // Now the real backup master should start up
+ LOG.debug("wait backup master to startup");
+ waitBackupMaster(UTIL, firstMaster);
+ assertEquals(true, firstMaster.isStopped());
+
+ // wait the store in here to abort (the test will fail due to timeout if it doesn't)
+ LOG.debug("wait the store to abort");
+ backupStore3.getStoreTracker().setDeleted(1, false);
+ backupStore3.delete(1);
+ backupStore3Abort.await();
+ }
+
+ // ==========================================================================
+ // Test Create Table
+ // ==========================================================================
+ @Test(timeout=60000)
+ public void testCreateWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestCreateTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testCreateWithFailoverAtStep(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS.ordinal());
+ }
+
+ private void testCreateWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step);
+
+ // create the table
+ ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ byte[][] splitKeys = null;
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, CreateTableState.values());
+
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ // ==========================================================================
+ // Test Delete Table
+ // ==========================================================================
+ @Test(timeout=60000)
+ public void testDeleteWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestDeleteTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testDeleteWithFailoverAtStep(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS.ordinal());
+ }
+
+ private void testDeleteWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteWithFailoverAtStep" + step);
+
+ // create the table
+ byte[][] splitKeys = null;
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ Path tableDir = FSUtils.getTableDir(getRootDir(), tableName);
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, DeleteTableState.values());
+
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ // ==========================================================================
+ // Test Helpers
+ // ==========================================================================
+ public static <TState> void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil,
+ final long procId, final int lastStepBeforeFailover, TState[] states) throws Exception {
+ ProcedureExecutor<MasterProcedureEnv> procExec =
+ testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ for (int i = 0; i < lastStepBeforeFailover; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + states[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+
+ LOG.info("Trigger master failover");
+ masterFailover(testUtil);
+
+ procExec = testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ }
+
+ // ==========================================================================
+ // Master failover utils
+ // ==========================================================================
+ public static void masterFailover(final HBaseTestingUtility testUtil)
+ throws Exception {
+ MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
+
+ // Kill the master
+ HMaster oldMaster = cluster.getMaster();
+ cluster.killMaster(cluster.getMaster().getServerName());
+
+ // Wait the secondary
+ waitBackupMaster(testUtil, oldMaster);
+ }
+
+ public static void waitBackupMaster(final HBaseTestingUtility testUtil,
+ final HMaster oldMaster) throws Exception {
+ MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
+
+ HMaster newMaster = cluster.getMaster();
+ while (newMaster == null || newMaster == oldMaster) {
+ Thread.sleep(250);
+ newMaster = cluster.getMaster();
+ }
+
+ while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) {
+ Thread.sleep(250);
+ }
+ }
+
+ // ==========================================================================
+ // Helpers
+ // ==========================================================================
+ private MasterProcedureEnv getMasterProcedureEnv() {
+ return getMasterProcedureExecutor().getEnvironment();
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ private FileSystem getFileSystem() {
+ return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ }
+
+ private Path getRootDir() {
+ return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ }
+
+ private Path getTempDir() {
+ return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getTempDir();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java
new file mode 100644
index 0000000..d22930f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java
@@ -0,0 +1,433 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestMasterProcedureQueue {
+ private static final Log LOG = LogFactory.getLog(TestMasterProcedureQueue.class);
+
+ private MasterProcedureQueue queue;
+ private Configuration conf;
+
+ @Before
+ public void setUp() throws IOException {
+ conf = HBaseConfiguration.create();
+ queue = new MasterProcedureQueue(conf, new TableLockManager.NullTableLockManager());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ assertEquals(0, queue.size());
+ }
+
+ /**
+ * Verify simple create/insert/fetch/delete of the table queue.
+ */
+ @Test
+ public void testSimpleTableOpsQueues() throws Exception {
+ final int NUM_TABLES = 10;
+ final int NUM_ITEMS = 10;
+
+ int count = 0;
+ for (int i = 1; i <= NUM_TABLES; ++i) {
+ TableName tableName = TableName.valueOf(String.format("test-%04d", i));
+ // insert items
+ for (int j = 1; j <= NUM_ITEMS; ++j) {
+ queue.addBack(new TestTableProcedure(i * 1000 + j, tableName,
+ TableProcedureInterface.TableOperationType.EDIT));
+ assertEquals(++count, queue.size());
+ }
+ }
+ assertEquals(NUM_TABLES * NUM_ITEMS, queue.size());
+
+ for (int j = 1; j <= NUM_ITEMS; ++j) {
+ for (int i = 1; i <= NUM_TABLES; ++i) {
+ Long procId = queue.poll();
+ assertEquals(--count, queue.size());
+ assertEquals(i * 1000 + j, procId.longValue());
+ }
+ }
+ assertEquals(0, queue.size());
+
+ for (int i = 1; i <= NUM_TABLES; ++i) {
+ TableName tableName = TableName.valueOf(String.format("test-%04d", i));
+ // complete the table deletion
+ assertTrue(queue.markTableAsDeleted(tableName));
+ }
+ }
+
+ /**
+ * Check that the table queue is not deletable until every procedure
+ * in-progress is completed (this is a special case for write-locks).
+ */
+ @Test
+ public void testCreateDeleteTableOperationsWithWriteLock() throws Exception {
+ TableName tableName = TableName.valueOf("testtb");
+
+ queue.addBack(new TestTableProcedure(1, tableName,
+ TableProcedureInterface.TableOperationType.EDIT));
+
+ // table can't be deleted because one item is in the queue
+ assertFalse(queue.markTableAsDeleted(tableName));
+
+ // fetch item and take a lock
+ assertEquals(1, queue.poll().longValue());
+ // take the xlock
+ assertTrue(queue.tryAcquireTableWrite(tableName, "write"));
+ // table can't be deleted because we have the lock
+ assertEquals(0, queue.size());
+ assertFalse(queue.markTableAsDeleted(tableName));
+ // release the xlock
+ queue.releaseTableWrite(tableName);
+ // complete the table deletion
+ assertTrue(queue.markTableAsDeleted(tableName));
+ }
+
+ /**
+ * Check that the table queue is not deletable until every procedure
+ * in-progress is completed (this is a special case for read-locks).
+ */
+ @Test
+ public void testCreateDeleteTableOperationsWithReadLock() throws Exception {
+ final TableName tableName = TableName.valueOf("testtb");
+ final int nitems = 2;
+
+ for (int i = 1; i <= nitems; ++i) {
+ queue.addBack(new TestTableProcedure(i, tableName,
+ TableProcedureInterface.TableOperationType.READ));
+ }
+
+ // table can't be deleted because one item is in the queue
+ assertFalse(queue.markTableAsDeleted(tableName));
+
+ for (int i = 1; i <= nitems; ++i) {
+ // fetch item and take a lock
+ assertEquals(i, queue.poll().longValue());
+ // take the rlock
+ assertTrue(queue.tryAcquireTableRead(tableName, "read " + i));
+ // table can't be deleted because we have locks and/or items in the queue
+ assertFalse(queue.markTableAsDeleted(tableName));
+ }
+
+ for (int i = 1; i <= nitems; ++i) {
+ // table can't be deleted because we have locks
+ assertFalse(queue.markTableAsDeleted(tableName));
+ // release the rlock
+ queue.releaseTableRead(tableName);
+ }
+
+ // there are no items and no lock in the queeu
+ assertEquals(0, queue.size());
+ // complete the table deletion
+ assertTrue(queue.markTableAsDeleted(tableName));
+ }
+
+ /**
+ * Verify the correct logic of RWLocks on the queue
+ */
+ @Test
+ public void testVerifyRwLocks() throws Exception {
+ TableName tableName = TableName.valueOf("testtb");
+ queue.addBack(new TestTableProcedure(1, tableName,
+ TableProcedureInterface.TableOperationType.EDIT));
+ queue.addBack(new TestTableProcedure(2, tableName,
+ TableProcedureInterface.TableOperationType.READ));
+ queue.addBack(new TestTableProcedure(3, tableName,
+ TableProcedureInterface.TableOperationType.EDIT));
+ queue.addBack(new TestTableProcedure(4, tableName,
+ TableProcedureInterface.TableOperationType.READ));
+ queue.addBack(new TestTableProcedure(5, tableName,
+ TableProcedureInterface.TableOperationType.READ));
+
+ // Fetch the 1st item and take the write lock
+ Long procId = queue.poll();
+ assertEquals(1, procId.longValue());
+ assertEquals(true, queue.tryAcquireTableWrite(tableName, "write " + procId));
+
+ // Fetch the 2nd item and verify that the lock can't be acquired
+ assertEquals(null, queue.poll());
+
+ // Release the write lock and acquire the read lock
+ queue.releaseTableWrite(tableName);
+
+ // Fetch the 2nd item and take the read lock
+ procId = queue.poll();
+ assertEquals(2, procId.longValue());
+ assertEquals(true, queue.tryAcquireTableRead(tableName, "read " + procId));
+
+ // Fetch the 3rd item and verify that the lock can't be acquired
+ procId = queue.poll();
+ assertEquals(3, procId.longValue());
+ assertEquals(false, queue.tryAcquireTableWrite(tableName, "write " + procId));
+
+ // release the rdlock of item 2 and take the wrlock for the 3d item
+ queue.releaseTableRead(tableName);
+ assertEquals(true, queue.tryAcquireTableWrite(tableName, "write " + procId));
+
+ // Fetch 4th item and verify that the lock can't be acquired
+ assertEquals(null, queue.poll());
+
+ // Release the write lock and acquire the read lock
+ queue.releaseTableWrite(tableName);
+
+ // Fetch the 4th item and take the read lock
+ procId = queue.poll();
+ assertEquals(4, procId.longValue());
+ assertEquals(true, queue.tryAcquireTableRead(tableName, "read " + procId));
+
+ // Fetch the 4th item and take the read lock
+ procId = queue.poll();
+ assertEquals(5, procId.longValue());
+ assertEquals(true, queue.tryAcquireTableRead(tableName, "read " + procId));
+
+ // Release 4th and 5th read-lock
+ queue.releaseTableRead(tableName);
+ queue.releaseTableRead(tableName);
+
+ // remove table queue
+ assertEquals(0, queue.size());
+ assertTrue("queue should be deleted", queue.markTableAsDeleted(tableName));
+ }
+
+ /**
+ * Verify that "write" operations for a single table are serialized,
+ * but different tables can be executed in parallel.
+ */
+ @Test(timeout=90000)
+ public void testConcurrentWriteOps() throws Exception {
+ final TestTableProcSet procSet = new TestTableProcSet(queue);
+
+ final int NUM_ITEMS = 10;
+ final int NUM_TABLES = 4;
+ final AtomicInteger opsCount = new AtomicInteger(0);
+ for (int i = 0; i < NUM_TABLES; ++i) {
+ TableName tableName = TableName.valueOf(String.format("testtb-%04d", i));
+ for (int j = 1; j < NUM_ITEMS; ++j) {
+ procSet.addBack(new TestTableProcedure(i * 100 + j, tableName,
+ TableProcedureInterface.TableOperationType.EDIT));
+ opsCount.incrementAndGet();
+ }
+ }
+ assertEquals(opsCount.get(), queue.size());
+
+ final Thread[] threads = new Thread[NUM_TABLES * 2];
+ final HashSet<TableName> concurrentTables = new HashSet<TableName>();
+ final ArrayList<String> failures = new ArrayList<String>();
+ final AtomicInteger concurrentCount = new AtomicInteger(0);
+ for (int i = 0; i < threads.length; ++i) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ while (opsCount.get() > 0) {
+ try {
+ TableProcedureInterface proc = procSet.acquire();
+ if (proc == null) {
+ queue.signalAll();
+ if (opsCount.get() > 0) {
+ continue;
+ }
+ break;
+ }
+ synchronized (concurrentTables) {
+ assertTrue("unexpected concurrency on " + proc.getTableName(),
+ concurrentTables.add(proc.getTableName()));
+ }
+ assertTrue(opsCount.decrementAndGet() >= 0);
+ try {
+ long procId = ((Procedure)proc).getProcId();
+ TableName tableId = proc.getTableName();
+ int concurrent = concurrentCount.incrementAndGet();
+ assertTrue("inc-concurrent="+ concurrent +" 1 <= concurrent <= "+ NUM_TABLES,
+ concurrent >= 1 && concurrent <= NUM_TABLES);
+ LOG.debug("[S] tableId="+ tableId +" procId="+ procId +" concurrent="+ concurrent);
+ Thread.sleep(2000);
+ concurrent = concurrentCount.decrementAndGet();
+ LOG.debug("[E] tableId="+ tableId +" procId="+ procId +" concurrent="+ concurrent);
+ assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES);
+ } finally {
+ synchronized (concurrentTables) {
+ assertTrue(concurrentTables.remove(proc.getTableName()));
+ }
+ procSet.release(proc);
+ }
+ } catch (Throwable e) {
+ LOG.error("Failed " + e.getMessage(), e);
+ synchronized (failures) {
+ failures.add(e.getMessage());
+ }
+ } finally {
+ queue.signalAll();
+ }
+ }
+ }
+ };
+ threads[i].start();
+ }
+ for (int i = 0; i < threads.length; ++i) {
+ threads[i].join();
+ }
+ assertTrue(failures.toString(), failures.isEmpty());
+ assertEquals(0, opsCount.get());
+ assertEquals(0, queue.size());
+
+ for (int i = 1; i <= NUM_TABLES; ++i) {
+ TableName table = TableName.valueOf(String.format("testtb-%04d", i));
+ assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table));
+ }
+ }
+
+ public static class TestTableProcSet {
+ private final MasterProcedureQueue queue;
+ private Map<Long, TableProcedureInterface> procsMap =
+ new ConcurrentHashMap<Long, TableProcedureInterface>();
+
+ public TestTableProcSet(final MasterProcedureQueue queue) {
+ this.queue = queue;
+ }
+
+ public void addBack(TableProcedureInterface tableProc) {
+ Procedure proc = (Procedure)tableProc;
+ procsMap.put(proc.getProcId(), tableProc);
+ queue.addBack(proc);
+ }
+
+ public void addFront(TableProcedureInterface tableProc) {
+ Procedure proc = (Procedure)tableProc;
+ procsMap.put(proc.getProcId(), tableProc);
+ queue.addFront(proc);
+ }
+
+ public TableProcedureInterface acquire() {
+ TableProcedureInterface proc = null;
+ boolean avail = false;
+ while (!avail) {
+ Long procId = queue.poll();
+ proc = procId != null ? procsMap.remove(procId) : null;
+ if (proc == null) break;
+ switch (proc.getTableOperationType()) {
+ case CREATE:
+ case DELETE:
+ case EDIT:
+ avail = queue.tryAcquireTableWrite(proc.getTableName(),
+ "op="+ proc.getTableOperationType());
+ break;
+ case READ:
+ avail = queue.tryAcquireTableRead(proc.getTableName(),
+ "op="+ proc.getTableOperationType());
+ break;
+ }
+ if (!avail) {
+ addFront(proc);
+ LOG.debug("yield procId=" + procId);
+ }
+ }
+ return proc;
+ }
+
+ public void release(TableProcedureInterface proc) {
+ switch (proc.getTableOperationType()) {
+ case CREATE:
+ case DELETE:
+ case EDIT:
+ queue.releaseTableWrite(proc.getTableName());
+ break;
+ case READ:
+ queue.releaseTableRead(proc.getTableName());
+ break;
+ }
+ }
+ }
+
+ public static class TestTableProcedure extends Procedure<Void>
+ implements TableProcedureInterface {
+ private final TableOperationType opType;
+ private final TableName tableName;
+
+ public TestTableProcedure() {
+ throw new UnsupportedOperationException("recovery should not be triggered here");
+ }
+
+ public TestTableProcedure(long procId, TableName tableName, TableOperationType opType) {
+ this.tableName = tableName;
+ this.opType = opType;
+ setProcId(procId);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return opType;
+ }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ return null;
+ }
+
+ @Override
+ protected void rollback(Void env) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected boolean abort(Void env) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected void serializeStateData(final OutputStream stream) throws IOException {}
+
+ @Override
+ protected void deserializeStateData(final InputStream stream) throws IOException {}
+ }
+}
[12/50] [abbrv] hbase git commit: HBASE-13203 Procedure v2 - master
create/delete table
Posted by jm...@apache.org.
HBASE-13203 Procedure v2 - master create/delete table
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5f1f98a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5f1f98a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5f1f98a
Branch: refs/heads/hbase-11339
Commit: b5f1f98a2500dc8621a7e57995cec9f37c4d1438
Parents: 04246c6
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 20:47:46 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:42 2015 +0100
----------------------------------------------------------------------
.../apache/hadoop/hbase/MetaTableAccessor.java | 4 +-
.../hbase/exceptions/TimeoutIOException.java | 46 +
hbase-protocol/pom.xml | 1 +
.../generated/MasterProcedureProtos.java | 2633 ++++++++++++++++++
.../src/main/protobuf/MasterProcedure.proto | 74 +
hbase-server/pom.xml | 10 +
.../apache/hadoop/hbase/ipc/RpcCallContext.java | 6 +
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 15 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 106 +-
.../hadoop/hbase/master/MasterServices.java | 7 +
.../hbase/master/TableNamespaceManager.java | 19 +-
.../master/procedure/CreateTableProcedure.java | 442 +++
.../master/procedure/DeleteTableProcedure.java | 420 +++
.../procedure/MasterProcedureConstants.java | 31 +
.../master/procedure/MasterProcedureEnv.java | 123 +
.../master/procedure/MasterProcedureQueue.java | 448 +++
.../master/procedure/MasterProcedureUtil.java | 56 +
.../master/procedure/ProcedurePrepareLatch.java | 105 +
.../master/procedure/ProcedureSyncWait.java | 179 ++
.../procedure/TableProcedureInterface.java | 46 +
.../hadoop/hbase/quotas/MasterQuotaManager.java | 15 +-
.../hbase/regionserver/HRegionServer.java | 9 +-
.../hadoop/hbase/util/ModifyRegionUtils.java | 24 +
.../hadoop/hbase/master/TestCatalogJanitor.java | 9 +-
.../MasterProcedureTestingUtility.java | 317 +++
.../procedure/TestCreateTableProcedure.java | 257 ++
.../procedure/TestDeleteTableProcedure.java | 208 ++
.../TestMasterFailoverWithProcedures.java | 291 ++
.../procedure/TestMasterProcedureQueue.java | 433 +++
29 files changed, 6280 insertions(+), 54 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index d18239b..ea29e4f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -249,8 +249,10 @@ public class MetaTableAccessor {
static Table getMetaHTable(final Connection connection)
throws IOException {
// We used to pass whole CatalogTracker in here, now we just pass in Connection
- if (connection == null || connection.isClosed()) {
+ if (connection == null) {
throw new NullPointerException("No connection");
+ } else if (connection.isClosed()) {
+ throw new IOException("connection is closed");
}
return connection.getTable(TableName.META_TABLE_NAME);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java
new file mode 100644
index 0000000..4e1ee39
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.exceptions;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Exception thrown when a blocking operation times out.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+public class TimeoutIOException extends IOException {
+ public TimeoutIOException() {
+ super();
+ }
+
+ public TimeoutIOException(final String message) {
+ super(message);
+ }
+
+ public TimeoutIOException(final String message, final Throwable t) {
+ super(message, t);
+ }
+
+ public TimeoutIOException(final Throwable t) {
+ super(t);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-protocol/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 0d33332..fb5e0ab 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -175,6 +175,7 @@
<include>LoadBalancer.proto</include>
<include>MapReduce.proto</include>
<include>Master.proto</include>
+ <include>MasterProcedure.proto</include>
<include>MultiRowMutation.proto</include>
<include>Procedure.proto</include>
<include>Quota.proto</include>
[02/50] [abbrv] hbase git commit: HBASE-13423 Remove old duplicate
entry for hbase.regionserver.regionSplitLimit.
Posted by jm...@apache.org.
HBASE-13423 Remove old duplicate entry for hbase.regionserver.regionSplitLimit.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d20c08ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d20c08ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d20c08ea
Branch: refs/heads/hbase-11339
Commit: d20c08ea5891c22e9f5b6c11b0c6f1e12f2a0f08
Parents: 3cd929e
Author: Apekshit(Appy) Sharma <ap...@cloudera.com>
Authored: Wed Apr 8 00:34:01 2015 -0700
Committer: Misty Stanley-Jones <ms...@cloudera.com>
Committed: Fri Apr 10 20:40:15 2015 +1000
----------------------------------------------------------------------
hbase-common/src/main/resources/hbase-default.xml | 8 --------
1 file changed, 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d20c08ea/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index c51ba16..ff4136b 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -240,14 +240,6 @@ possible configurations would overwhelm and obscure the important.
in milliseconds.</description>
</property>
<property>
- <name>hbase.regionserver.regionSplitLimit</name>
- <value>2147483647</value>
- <description>Limit for the number of regions after which no more region
- splitting should take place. This is not a hard limit for the number of
- regions but acts as a guideline for the regionserver to stop splitting after
- a certain limit. Default is MAX_INT; i.e. do not block splitting.</description>
- </property>
- <property>
<name>hbase.regionserver.logroll.period</name>
<value>3600000</value>
<description>Period at which we will roll the commit log regardless
[43/50] [abbrv] hbase git commit: HBASE-13350 Log warnings for sanity
check failures when checks disabled.
Posted by jm...@apache.org.
HBASE-13350 Log warnings for sanity check failures when checks disabled.
Signed-off-by: Matteo Bertozzi <ma...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ddab4726
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ddab4726
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ddab4726
Branch: refs/heads/hbase-11339
Commit: ddab4726f6675943fc31b697a3d974bab74cd9ec
Parents: 682a29a
Author: Matt Warhaftig <mw...@gmail.com>
Authored: Wed Apr 15 00:29:34 2015 -0400
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Thu Apr 16 09:51:47 2015 +0100
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/master/HMaster.java | 64 +++++++++++---------
.../hadoop/hbase/client/TestFromClientSide.java | 41 +++++++++++++
2 files changed, 78 insertions(+), 27 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/ddab4726/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index bcc43f4..9bd1dbb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1373,12 +1373,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
*/
private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
final String CONF_KEY = "hbase.table.sanity.checks";
+ boolean logWarn = false;
if (!conf.getBoolean(CONF_KEY, true)) {
- return;
+ logWarn = true;
}
String tableVal = htd.getConfigurationValue(CONF_KEY);
if (tableVal != null && !Boolean.valueOf(tableVal)) {
- return;
+ logWarn = true;
}
// check max file size
@@ -1388,11 +1389,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
}
if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
- throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or "
- + "\"hbase.hregion.max.filesize\" (" + maxFileSize
- + ") is too small, which might cause over splitting into unmanageable "
- + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor "
- + "if you want to bypass sanity checks");
+ String message = "MAX_FILESIZE for table descriptor or "
+ + "\"hbase.hregion.max.filesize\" (" + maxFileSize
+ + ") is too small, which might cause over splitting into unmanageable "
+ + "number of regions.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check flush size
@@ -1402,72 +1403,81 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
}
if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
- throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or "
+ String message = "MEMSTORE_FLUSHSIZE for table descriptor or "
+ "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause"
- + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor "
- + "if you want to bypass sanity checks");
+ + " very frequent flushing.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check that coprocessors and other specified plugin classes can be loaded
try {
checkClassLoading(conf, htd);
} catch (Exception ex) {
- throw new DoNotRetryIOException(ex);
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, ex.getMessage(), null);
}
// check compression can be loaded
try {
checkCompression(htd);
} catch (IOException e) {
- throw new DoNotRetryIOException(e.getMessage(), e);
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
}
// check encryption can be loaded
try {
checkEncryption(conf, htd);
} catch (IOException e) {
- throw new DoNotRetryIOException(e.getMessage(), e);
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
}
// check that we have at least 1 CF
if (htd.getColumnFamilies().length == 0) {
- throw new DoNotRetryIOException("Table should have at least one column family "
- + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks");
+ String message = "Table should have at least one column family.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.getTimeToLive() <= 0) {
- throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString()
- + " must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
- + "if you want to bypass sanity checks");
+ String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check blockSize
if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
- throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString()
- + " must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor "
- + "if you want to bypass sanity checks");
+ String message = "Block size for column family " + hcd.getNameAsString()
+ + " must be between 1K and 16MB.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check versions
if (hcd.getMinVersions() < 0) {
- throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString()
- + " must be positive. Set " + CONF_KEY + " to false at conf or table descriptor "
- + "if you want to bypass sanity checks");
+ String message = "Min versions for column family " + hcd.getNameAsString()
+ + " must be positive.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// max versions already being checked
// check replication scope
if (hcd.getScope() < 0) {
- throw new DoNotRetryIOException("Replication scope for column family "
- + hcd.getNameAsString() + " must be positive. Set " + CONF_KEY + " to false at conf "
- + "or table descriptor if you want to bypass sanity checks");
+ String message = "Replication scope for column family "
+ + hcd.getNameAsString() + " must be positive.";
+ warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// TODO: should we check coprocessors and encryption ?
}
}
+ // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
+ private static void warnOrThrowExceptionForFailure(boolean logWarn, String confKey,
+ String message, Exception cause) throws IOException {
+ if (!logWarn) {
+ throw new DoNotRetryIOException(message + " Set " + confKey +
+ " to false at conf or table descriptor if you want to bypass sanity checks", cause);
+ }
+ LOG.warn(message);
+ }
+
private void startActiveMasterManager(int infoPort) throws KeeperException {
String backupZNode = ZKUtil.joinZNode(
zooKeeper.backupMasterAddressesZNode, serverName.toString());
http://git-wip-us.apache.org/repos/asf/hbase/blob/ddab4726/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 5dba49c..e337ce2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -43,6 +43,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
+import org.apache.log4j.Level;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.filter.WhileMatchFilter;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
@@ -99,6 +101,9 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@@ -5438,8 +5443,44 @@ public class TestFromClientSide {
// check the conf settings to disable sanity checks
htd.setMemStoreFlushSize(0);
+
+ // Check that logs warn on invalid table but allow it.
+ ListAppender listAppender = new ListAppender();
+ Logger log = Logger.getLogger(HMaster.class);
+ log.addAppender(listAppender);
+ log.setLevel(Level.WARN);
+
htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
checkTableIsLegal(htd);
+
+ assertFalse(listAppender.getMessages().isEmpty());
+ assertTrue(listAppender.getMessages().get(0).startsWith("MEMSTORE_FLUSHSIZE for table "
+ + "descriptor or \"hbase.hregion.memstore.flush.size\" (0) is too small, which might "
+ + "cause very frequent flushing."));
+
+ log.removeAppender(listAppender);
+ }
+
+ private static class ListAppender extends AppenderSkeleton {
+ private final List<String> messages = new ArrayList<String>();
+
+ @Override
+ protected void append(LoggingEvent event) {
+ messages.add(event.getMessage().toString());
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ public List<String> getMessages() {
+ return messages;
+ }
}
private void checkTableIsLegal(HTableDescriptor htd) throws IOException {
[28/50] [abbrv] hbase git commit: HBASE-13209 Procedure V2 - master
Add/Modify/Delete Column Family (addendum)
Posted by jm...@apache.org.
HBASE-13209 Procedure V2 - master Add/Modify/Delete Column Family (addendum)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e994b491
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e994b491
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e994b491
Branch: refs/heads/hbase-11339
Commit: e994b491aca8ab2edeb60a328c690ddbc88f8b51
Parents: 1890bff
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Fri Apr 10 23:11:40 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 23:11:40 2015 +0100
----------------------------------------------------------------------
.../hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java | 2 --
.../hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java | 2 --
.../hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java | 2 --
.../apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java | 2 --
4 files changed, 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e994b491/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
index 98a00c2..377ccb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState;
import org.apache.hadoop.security.UserGroupInformation;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e994b491/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
index a053c89..6e96910 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState;
import org.apache.hadoop.hbase.util.ByteStringer;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e994b491/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
index 138ebd8..5aa04db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState;
import org.apache.hadoop.security.UserGroupInformation;
http://git-wip-us.apache.org/repos/asf/hbase/blob/e994b491/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 8ccdaa4..a082ea4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@@ -47,7 +46,6 @@ import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
[40/50] [abbrv] hbase git commit: HBASE-12006 [JDK 8]
KeyStoreTestUtil#generateCertificate fails due to "subject class type
invalid"
Posted by jm...@apache.org.
HBASE-12006 [JDK 8] KeyStoreTestUtil#generateCertificate fails due to "subject class type invalid"
This is a port of the fix from HADOOP-10847
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2da1bf10
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2da1bf10
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2da1bf10
Branch: refs/heads/hbase-11339
Commit: 2da1bf10b8d83b51228f76a0603394a4a5ae03cb
Parents: d314f7d
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed Apr 15 09:47:34 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed Apr 15 09:47:34 2015 -0700
----------------------------------------------------------------------
hbase-server/pom.xml | 5 ++
.../hadoop/hbase/http/ssl/KeyStoreTestUtil.java | 69 +++++++-------------
pom.xml | 7 ++
3 files changed, 35 insertions(+), 46 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/2da1bf10/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 107480a..4becc40 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -511,6 +511,11 @@
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<profiles>
<!-- Skip the tests in this module -->
http://git-wip-us.apache.org/repos/asf/hbase/blob/2da1bf10/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java
index 248b820..8668738 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java
@@ -26,38 +26,32 @@ import java.io.Writer;
import java.math.BigInteger;
import java.net.URL;
import java.security.GeneralSecurityException;
+import java.security.InvalidKeyException;
import java.security.Key;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.KeyStore;
import java.security.NoSuchAlgorithmException;
-import java.security.PrivateKey;
+import java.security.NoSuchProviderException;
import java.security.SecureRandom;
+import java.security.SignatureException;
import java.security.cert.Certificate;
+import java.security.cert.CertificateEncodingException;
import java.security.cert.X509Certificate;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
+import javax.security.auth.x500.X500Principal;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory;
import org.apache.hadoop.security.ssl.SSLFactory;
-
-import sun.security.x509.AlgorithmId;
-import sun.security.x509.CertificateAlgorithmId;
-import sun.security.x509.CertificateIssuerName;
-import sun.security.x509.CertificateSerialNumber;
-import sun.security.x509.CertificateSubjectName;
-import sun.security.x509.CertificateValidity;
-import sun.security.x509.CertificateVersion;
-import sun.security.x509.CertificateX509Key;
-import sun.security.x509.X500Name;
-import sun.security.x509.X509CertImpl;
-import sun.security.x509.X509CertInfo;
+import org.bouncycastle.x509.X509V1CertificateGenerator;
public class KeyStoreTestUtil {
- public static String getClasspathDir(Class klass) throws Exception {
+ public static String getClasspathDir(Class<?> klass) throws Exception {
String file = klass.getName();
file = file.replace('.', '/') + ".class";
URL url = Thread.currentThread().getContextClassLoader().getResource(file);
@@ -68,48 +62,31 @@ public class KeyStoreTestUtil {
/**
* Create a self-signed X.509 Certificate.
- * From http://bfo.com/blog/2011/03/08/odds_and_ends_creating_a_new_x_509_certificate.html.
*
* @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB"
* @param pair the KeyPair
* @param days how many days from now the Certificate is valid for
* @param algorithm the signing algorithm, eg "SHA1withRSA"
* @return the self-signed certificate
- * @throws IOException thrown if an IO error ocurred.
- * @throws GeneralSecurityException thrown if an Security error ocurred.
*/
- public static X509Certificate generateCertificate(String dn, KeyPair pair,
- int days, String algorithm)
- throws GeneralSecurityException, IOException {
- PrivateKey privkey = pair.getPrivate();
- X509CertInfo info = new X509CertInfo();
+ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm)
+ throws CertificateEncodingException, InvalidKeyException, IllegalStateException,
+ NoSuchProviderException, NoSuchAlgorithmException, SignatureException {
Date from = new Date();
Date to = new Date(from.getTime() + days * 86400000l);
- CertificateValidity interval = new CertificateValidity(from, to);
BigInteger sn = new BigInteger(64, new SecureRandom());
- X500Name owner = new X500Name(dn);
-
- info.set(X509CertInfo.VALIDITY, interval);
- info.set(X509CertInfo.SERIAL_NUMBER, new CertificateSerialNumber(sn));
- info.set(X509CertInfo.SUBJECT, new CertificateSubjectName(owner));
- info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner));
- info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic()));
- info
- .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
- AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid);
- info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo));
-
- // Sign the cert to identify the algorithm that's used.
- X509CertImpl cert = new X509CertImpl(info);
- cert.sign(privkey, algorithm);
-
- // Update the algorith, and resign.
- algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG);
- info
- .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
- algo);
- cert = new X509CertImpl(info);
- cert.sign(privkey, algorithm);
+ KeyPair keyPair = pair;
+ X509V1CertificateGenerator certGen = new X509V1CertificateGenerator();
+ X500Principal dnName = new X500Principal(dn);
+
+ certGen.setSerialNumber(sn);
+ certGen.setIssuerDN(dnName);
+ certGen.setNotBefore(from);
+ certGen.setNotAfter(to);
+ certGen.setSubjectDN(dnName);
+ certGen.setPublicKey(keyPair.getPublic());
+ certGen.setSignatureAlgorithm(algorithm);
+ X509Certificate cert = certGen.generate(pair.getPrivate());
return cert;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/2da1bf10/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ce49c04..d188137 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1174,6 +1174,7 @@
<joni.version>2.1.2</joni.version>
<jcodings.version>1.0.8</jcodings.version>
<spy.version>2.11.6</spy.version>
+ <bouncycastle.version>1.46</bouncycastle.version>
<!-- Plugin Dependencies -->
<maven.assembly.version>2.4</maven.assembly.version>
<maven.antrun.version>1.6</maven.antrun.version>
@@ -1695,6 +1696,12 @@
</exclusion>
</exclusions>
</dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <version>${bouncycastle.version}</version>
+ <scope>test</scope>
+ </dependency>
</dependencies>
</dependencyManagement>
<!-- Dependencies needed by subprojects -->
[17/50] [abbrv] hbase git commit: HBASE-13211 Procedure V2 - master
Enable/Disable table (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 4713a0a..d83ee19 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -707,6 +707,242 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(enum_scope:DeleteColumnFamilyState)
}
+ /**
+ * Protobuf enum {@code EnableTableState}
+ */
+ public enum EnableTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>ENABLE_TABLE_PREPARE = 1;</code>
+ */
+ ENABLE_TABLE_PREPARE(0, 1),
+ /**
+ * <code>ENABLE_TABLE_PRE_OPERATION = 2;</code>
+ */
+ ENABLE_TABLE_PRE_OPERATION(1, 2),
+ /**
+ * <code>ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3;</code>
+ */
+ ENABLE_TABLE_SET_ENABLING_TABLE_STATE(2, 3),
+ /**
+ * <code>ENABLE_TABLE_MARK_REGIONS_ONLINE = 4;</code>
+ */
+ ENABLE_TABLE_MARK_REGIONS_ONLINE(3, 4),
+ /**
+ * <code>ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5;</code>
+ */
+ ENABLE_TABLE_SET_ENABLED_TABLE_STATE(4, 5),
+ /**
+ * <code>ENABLE_TABLE_POST_OPERATION = 6;</code>
+ */
+ ENABLE_TABLE_POST_OPERATION(5, 6),
+ ;
+
+ /**
+ * <code>ENABLE_TABLE_PREPARE = 1;</code>
+ */
+ public static final int ENABLE_TABLE_PREPARE_VALUE = 1;
+ /**
+ * <code>ENABLE_TABLE_PRE_OPERATION = 2;</code>
+ */
+ public static final int ENABLE_TABLE_PRE_OPERATION_VALUE = 2;
+ /**
+ * <code>ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3;</code>
+ */
+ public static final int ENABLE_TABLE_SET_ENABLING_TABLE_STATE_VALUE = 3;
+ /**
+ * <code>ENABLE_TABLE_MARK_REGIONS_ONLINE = 4;</code>
+ */
+ public static final int ENABLE_TABLE_MARK_REGIONS_ONLINE_VALUE = 4;
+ /**
+ * <code>ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5;</code>
+ */
+ public static final int ENABLE_TABLE_SET_ENABLED_TABLE_STATE_VALUE = 5;
+ /**
+ * <code>ENABLE_TABLE_POST_OPERATION = 6;</code>
+ */
+ public static final int ENABLE_TABLE_POST_OPERATION_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static EnableTableState valueOf(int value) {
+ switch (value) {
+ case 1: return ENABLE_TABLE_PREPARE;
+ case 2: return ENABLE_TABLE_PRE_OPERATION;
+ case 3: return ENABLE_TABLE_SET_ENABLING_TABLE_STATE;
+ case 4: return ENABLE_TABLE_MARK_REGIONS_ONLINE;
+ case 5: return ENABLE_TABLE_SET_ENABLED_TABLE_STATE;
+ case 6: return ENABLE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<EnableTableState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<EnableTableState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<EnableTableState>() {
+ public EnableTableState findValueByNumber(int number) {
+ return EnableTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
+ }
+
+ private static final EnableTableState[] VALUES = values();
+
+ public static EnableTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private EnableTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:EnableTableState)
+ }
+
+ /**
+ * Protobuf enum {@code DisableTableState}
+ */
+ public enum DisableTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>DISABLE_TABLE_PREPARE = 1;</code>
+ */
+ DISABLE_TABLE_PREPARE(0, 1),
+ /**
+ * <code>DISABLE_TABLE_PRE_OPERATION = 2;</code>
+ */
+ DISABLE_TABLE_PRE_OPERATION(1, 2),
+ /**
+ * <code>DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3;</code>
+ */
+ DISABLE_TABLE_SET_DISABLING_TABLE_STATE(2, 3),
+ /**
+ * <code>DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4;</code>
+ */
+ DISABLE_TABLE_MARK_REGIONS_OFFLINE(3, 4),
+ /**
+ * <code>DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5;</code>
+ */
+ DISABLE_TABLE_SET_DISABLED_TABLE_STATE(4, 5),
+ /**
+ * <code>DISABLE_TABLE_POST_OPERATION = 6;</code>
+ */
+ DISABLE_TABLE_POST_OPERATION(5, 6),
+ ;
+
+ /**
+ * <code>DISABLE_TABLE_PREPARE = 1;</code>
+ */
+ public static final int DISABLE_TABLE_PREPARE_VALUE = 1;
+ /**
+ * <code>DISABLE_TABLE_PRE_OPERATION = 2;</code>
+ */
+ public static final int DISABLE_TABLE_PRE_OPERATION_VALUE = 2;
+ /**
+ * <code>DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3;</code>
+ */
+ public static final int DISABLE_TABLE_SET_DISABLING_TABLE_STATE_VALUE = 3;
+ /**
+ * <code>DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4;</code>
+ */
+ public static final int DISABLE_TABLE_MARK_REGIONS_OFFLINE_VALUE = 4;
+ /**
+ * <code>DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5;</code>
+ */
+ public static final int DISABLE_TABLE_SET_DISABLED_TABLE_STATE_VALUE = 5;
+ /**
+ * <code>DISABLE_TABLE_POST_OPERATION = 6;</code>
+ */
+ public static final int DISABLE_TABLE_POST_OPERATION_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static DisableTableState valueOf(int value) {
+ switch (value) {
+ case 1: return DISABLE_TABLE_PREPARE;
+ case 2: return DISABLE_TABLE_PRE_OPERATION;
+ case 3: return DISABLE_TABLE_SET_DISABLING_TABLE_STATE;
+ case 4: return DISABLE_TABLE_MARK_REGIONS_OFFLINE;
+ case 5: return DISABLE_TABLE_SET_DISABLED_TABLE_STATE;
+ case 6: return DISABLE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<DisableTableState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<DisableTableState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<DisableTableState>() {
+ public DisableTableState findValueByNumber(int number) {
+ return DisableTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
+ }
+
+ private static final DisableTableState[] VALUES = values();
+
+ public static DisableTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private DisableTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:DisableTableState)
+ }
+
public interface CreateTableStateDataOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -7620,120 +7856,1901 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(class_scope:DeleteColumnFamilyStateData)
}
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_CreateTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_CreateTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ModifyTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ModifyTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DeleteTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DeleteTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_AddColumnFamilyStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_AddColumnFamilyStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ModifyColumnFamilyStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ModifyColumnFamilyStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DeleteColumnFamilyStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DeleteColumnFamilyStateData_fieldAccessorTable;
+ public interface EnableTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required bool skip_table_state_check = 3;
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ boolean hasSkipTableStateCheck();
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ boolean getSkipTableStateCheck();
}
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" +
- "C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
- "_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" +
- "chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" +
- "\030\003 \003(\0132\013.RegionInfo\"\277\001\n\024ModifyTableState" +
- "Data\022#\n\tuser_info\030\001 \002(\0132\020.UserInformatio" +
- "n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
- "leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
- "\014.TableSchema\022&\n\036delete_column_family_in" +
- "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
- "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
- "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
- "o\030\003 \003(\0132\013.RegionInfo\"\300\001\n\030AddColumnFamily" +
- "StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" +
- "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220" +
- "\n\023columnfamily_schema\030\003 \002(\0132\023.ColumnFami" +
- "lySchema\022-\n\027unmodified_table_schema\030\004 \001(" +
- "\0132\014.TableSchema\"\303\001\n\033ModifyColumnFamilySt" +
- "ateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInforma" +
- "tion\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023",
- "columnfamily_schema\030\003 \002(\0132\023.ColumnFamily" +
- "Schema\022-\n\027unmodified_table_schema\030\004 \001(\0132" +
- "\014.TableSchema\"\254\001\n\033DeleteColumnFamilyStat" +
- "eData\022#\n\tuser_info\030\001 \002(\0132\020.UserInformati" +
- "on\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\031\n\021co" +
- "lumnfamily_name\030\003 \002(\014\022-\n\027unmodified_tabl" +
- "e_schema\030\004 \001(\0132\014.TableSchema*\330\001\n\020CreateT" +
- "ableState\022\036\n\032CREATE_TABLE_PRE_OPERATION\020" +
- "\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030C" +
- "REATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABL",
- "E_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDAT" +
- "E_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPER" +
- "ATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY_T" +
- "ABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERA" +
- "TION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_DESC" +
- "RIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_" +
- "COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOU" +
- "T\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006\022#\n\037" +
- "MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\337\001\n\020De" +
- "leteTableState\022\036\n\032DELETE_TABLE_PRE_OPERA",
- "TION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020" +
- "\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036D" +
- "ELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELET" +
- "E_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TAB" +
- "LE_POST_OPERATION\020\006*\331\001\n\024AddColumnFamilyS" +
- "tate\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037A" +
- "DD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_" +
- "COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022" +
- "$\n ADD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n" +
- "$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*",
- "\353\001\n\027ModifyColumnFamilyState\022 \n\034MODIFY_CO" +
- "LUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_F" +
- "AMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_F" +
- "AMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIF" +
- "Y_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODI" +
- "FY_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002" +
- "\n\027DeleteColumnFamilyState\022 \n\034DELETE_COLU" +
- "MN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAM" +
- "ILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAM" +
- "ILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_",
- "COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELE" +
- "TE_COLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DEL" +
- "ETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006BK" +
- "\n*org.apache.hadoop.hbase.protobuf.gener" +
- "atedB\025MasterProcedureProtosH\001\210\001\001\240\001\001"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_CreateTableStateData_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_CreateTableStateData_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ /**
+ * Protobuf type {@code EnableTableStateData}
+ */
+ public static final class EnableTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements EnableTableStateDataOrBuilder {
+ // Use EnableTableStateData.newBuilder() to construct.
+ private EnableTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private EnableTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final EnableTableStateData defaultInstance;
+ public static EnableTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public EnableTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private EnableTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<EnableTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<EnableTableStateData>() {
+ public EnableTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new EnableTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<EnableTableStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ public static final int SKIP_TABLE_STATE_CHECK_FIELD_NUMBER = 3;
+ private boolean skipTableStateCheck_;
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ skipTableStateCheck_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(3, skipTableStateCheck_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(3, skipTableStateCheck_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasSkipTableStateCheck() == other.hasSkipTableStateCheck());
+ if (hasSkipTableStateCheck()) {
+ result = result && (getSkipTableStateCheck()
+ == other.getSkipTableStateCheck());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasSkipTableStateCheck()) {
+ hash = (37 * hash) + SKIP_TABLE_STATE_CHECK_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getSkipTableStateCheck());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code EnableTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ skipTableStateCheck_ = false;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.skipTableStateCheck_ = skipTableStateCheck_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasSkipTableStateCheck()) {
+ setSkipTableStateCheck(other.getSkipTableStateCheck());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ private boolean skipTableStateCheck_ ;
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public Builder setSkipTableStateCheck(boolean value) {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public Builder clearSkipTableStateCheck() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ skipTableStateCheck_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:EnableTableStateData)
+ }
+
+ static {
+ defaultInstance = new EnableTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:EnableTableStateData)
+ }
+
+ public interface DisableTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required bool skip_table_state_check = 3;
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ boolean hasSkipTableStateCheck();
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ boolean getSkipTableStateCheck();
+ }
+ /**
+ * Protobuf type {@code DisableTableStateData}
+ */
+ public static final class DisableTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements DisableTableStateDataOrBuilder {
+ // Use DisableTableStateData.newBuilder() to construct.
+ private DisableTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DisableTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DisableTableStateData defaultInstance;
+ public static DisableTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DisableTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DisableTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<DisableTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<DisableTableStateData>() {
+ public DisableTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DisableTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<DisableTableStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ public static final int SKIP_TABLE_STATE_CHECK_FIELD_NUMBER = 3;
+ private boolean skipTableStateCheck_;
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ skipTableStateCheck_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(3, skipTableStateCheck_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(3, skipTableStateCheck_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasSkipTableStateCheck() == other.hasSkipTableStateCheck());
+ if (hasSkipTableStateCheck()) {
+ result = result && (getSkipTableStateCheck()
+ == other.getSkipTableStateCheck());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasSkipTableStateCheck()) {
+ hash = (37 * hash) + SKIP_TABLE_STATE_CHECK_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getSkipTableStateCheck());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code DisableTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ skipTableStateCheck_ = false;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.skipTableStateCheck_ = skipTableStateCheck_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasSkipTableStateCheck()) {
+ setSkipTableStateCheck(other.getSkipTableStateCheck());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ private boolean skipTableStateCheck_ ;
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public Builder setSkipTableStateCheck(boolean value) {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool skip_table_state_check = 3;</code>
+ */
+ public Builder clearSkipTableStateCheck() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ skipTableStateCheck_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:DisableTableStateData)
+ }
+
+ static {
+ defaultInstance = new DisableTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DisableTableStateData)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CreateTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CreateTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ModifyTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ModifyTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_AddColumnFamilyStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_AddColumnFamilyStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ModifyColumnFamilyStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ModifyColumnFamilyStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteColumnFamilyStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteColumnFamilyStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_EnableTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_EnableTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DisableTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DisableTableStateData_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" +
+ "C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
+ "_info\030\001
<TRUNCATED>
[47/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
index 5bf5a30,0000000..b4c3aca
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
@@@ -1,305 -1,0 +1,305 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Test create/using/deleting snapshots from the client
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ */
+@Category({LargeTests.class, ClientTests.class})
+public class TestMobSnapshotFromClient {
+ private static final Log LOG = LogFactory.getLog(TestSnapshotFromClient.class);
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final int NUM_RS = 2;
+ private static final String STRING_TABLE_NAME = "test";
+ protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
+ protected static final TableName TABLE_NAME =
+ TableName.valueOf(STRING_TABLE_NAME);
+
+ /**
+ * Setup the config for the cluster
+ * @throws Exception on failure
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(NUM_RS);
+ }
+
+ private static void setupConf(Configuration conf) {
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // change the flush size to a small amount, regulating number of store files
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // so make sure we get a compaction when doing a load, but keep around some
+ // files in the store
+ conf.setInt("hbase.hstore.compaction.min", 10);
+ conf.setInt("hbase.hstore.compactionThreshold", 10);
+ // block writes if we get to 12 store files
+ conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+ // Enable snapshot
+ conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+ ConstantSizeRegionSplitPolicy.class.getName());
+ conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), TEST_FAM);
+ }
+
+ protected int getNumReplicas() {
+ return 1;
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ UTIL.deleteTable(TABLE_NAME);
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ /**
+ * Test snapshotting not allowed hbase:meta and -ROOT-
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testMetaTablesSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ byte[] snapshotName = Bytes.toBytes("metaSnapshot");
+
+ try {
+ admin.snapshot(snapshotName, TableName.META_TABLE_NAME);
+ fail("taking a snapshot of hbase:meta should not be allowed");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ }
+
+ /**
+ * Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter
+ *
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testSnapshotDeletionWithRegex() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ UTIL.loadTable(table, TEST_FAM);
+ table.close();
+
+ byte[] snapshot1 = Bytes.toBytes("TableSnapshot1");
+ admin.snapshot(snapshot1, TABLE_NAME);
+ LOG.debug("Snapshot1 completed.");
+
+ byte[] snapshot2 = Bytes.toBytes("TableSnapshot2");
+ admin.snapshot(snapshot2, TABLE_NAME);
+ LOG.debug("Snapshot2 completed.");
+
+ String snapshot3 = "3rdTableSnapshot";
+ admin.snapshot(Bytes.toBytes(snapshot3), TABLE_NAME);
+ LOG.debug(snapshot3 + " completed.");
+
+ // delete the first two snapshots
+ admin.deleteSnapshots("TableSnapshot.*");
+ List<SnapshotDescription> snapshots = admin.listSnapshots();
+ assertEquals(1, snapshots.size());
+ assertEquals(snapshots.get(0).getName(), snapshot3);
+
+ admin.deleteSnapshot(snapshot3);
+ admin.close();
+ }
+ /**
+ * Test snapshotting a table that is offline
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testOfflineTableSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ UTIL.loadTable(table, TEST_FAM, false);
+
+ LOG.debug("FS state before disable:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ // XXX if this is flakey, might want to consider using the async version and looping as
+ // disableTable can succeed and still timeout.
+ admin.disableTable(TABLE_NAME);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the disabled table
+ final String SNAPSHOT_NAME = "offlineTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
+
+ SnapshotDescription desc = SnapshotDescription.newBuilder()
+ .setType(SnapshotDescription.Type.DISABLED)
+ .setTable(STRING_TABLE_NAME)
+ .setName(SNAPSHOT_NAME)
+ .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
+ .build();
+ admin.snapshot(desc);
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+
+ admin.deleteSnapshot(snapshot);
+ snapshots = admin.listSnapshots();
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ }
+
+ @Test (timeout=300000)
+ public void testSnapshotFailsOnNonExistantTable() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ String tableName = "_not_a_table";
+
+ // make sure the table doesn't exist
+ boolean fail = false;
+ do {
+ try {
+ admin.getTableDescriptor(TableName.valueOf(tableName));
+ fail = true;
+ LOG.error("Table:" + tableName + " already exists, checking a new name");
+ tableName = tableName+"!";
+ } catch (TableNotFoundException e) {
+ fail = false;
+ }
+ } while (fail);
+
+ // snapshot the non-existant table
+ try {
+ admin.snapshot("fail", TableName.valueOf(tableName));
+ fail("Snapshot succeeded even though there is not table.");
+ } catch (SnapshotCreationException e) {
+ LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
+ }
+ }
+
+ @Test (timeout=300000)
+ public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
+ // test with an empty table with one region
+
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ LOG.debug("FS state before disable:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ admin.disableTable(TABLE_NAME);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the disabled table
+ byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegions");
+ admin.snapshot(snapshot, TABLE_NAME);
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
+ List<byte[]> nonEmptyCfs = Lists.newArrayList();
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs,
+ rootDir, admin, fs);
+
+ admin.deleteSnapshot(snapshot);
+ snapshots = admin.listSnapshots();
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
index 5e28cd9,0000000..bc3354b
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
@@@ -1,86 -1,0 +1,87 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import java.io.IOException;
+import java.util.Random;
+
++import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+
+public class MobTestUtil {
+ protected static final char FIRST_CHAR = 'a';
+ protected static final char LAST_CHAR = 'z';
+
+ protected static String generateRandomString(int demoLength) {
+ String base = "abcdefghijklmnopqrstuvwxyz";
+ Random random = new Random();
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < demoLength; i++) {
+ int number = random.nextInt(base.length());
+ sb.append(base.charAt(number));
+ }
+ return sb.toString();
+ }
+ protected static void writeStoreFile(final StoreFile.Writer writer, String caseName)
+ throws IOException {
+ writeStoreFile(writer, Bytes.toBytes(caseName), Bytes.toBytes(caseName));
+ }
+
+ /*
+ * Writes HStoreKey and ImmutableBytes data to passed writer and then closes
+ * it.
+ *
+ * @param writer
+ *
+ * @throws IOException
+ */
+ private static void writeStoreFile(final StoreFile.Writer writer, byte[] fam,
+ byte[] qualifier) throws IOException {
+ long now = System.currentTimeMillis();
+ try {
+ for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
+ for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
+ byte[] b = new byte[] { (byte) d, (byte) e };
+ writer.append(new KeyValue(b, fam, qualifier, now, b));
+ }
+ }
+ } finally {
+ writer.close();
+ }
+ }
+
+ /**
- * Compare two KeyValue only for their row family qualifier value
++ * Compare two Cells only for their row family qualifier value
+ */
- public static void assertKeyValuesEquals(KeyValue firstKeyValue,
- KeyValue secondKeyValue) {
++ public static void assertCellEquals(Cell firstKeyValue,
++ Cell secondKeyValue) {
+ Assert.assertEquals(Bytes.toString(CellUtil.cloneRow(firstKeyValue)),
+ Bytes.toString(CellUtil.cloneRow(secondKeyValue)));
+ Assert.assertEquals(Bytes.toString(CellUtil.cloneFamily(firstKeyValue)),
+ Bytes.toString(CellUtil.cloneFamily(secondKeyValue)));
+ Assert.assertEquals(Bytes.toString(CellUtil.cloneQualifier(firstKeyValue)),
+ Bytes.toString(CellUtil.cloneQualifier(secondKeyValue)));
+ Assert.assertEquals(Bytes.toString(CellUtil.cloneValue(firstKeyValue)),
+ Bytes.toString(CellUtil.cloneValue(secondKeyValue)));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
index e0e9541,0000000..b38e7cb
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java
@@@ -1,154 -1,0 +1,154 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
- import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestCachedMobFile extends TestCase{
+ static final Log LOG = LogFactory.getLog(TestCachedMobFile.class);
+ private Configuration conf = HBaseConfiguration.create();
+ private CacheConfig cacheConf = new CacheConfig(conf);
- private final String TABLE = "tableName";
- private final String FAMILY = "familyName";
- private final String FAMILY1 = "familyName1";
- private final String FAMILY2 = "familyName2";
- private final long EXPECTED_REFERENCE_ZERO = 0;
- private final long EXPECTED_REFERENCE_ONE = 1;
- private final long EXPECTED_REFERENCE_TWO = 2;
++ private static final String TABLE = "tableName";
++ private static final String FAMILY = "familyName";
++ private static final String FAMILY1 = "familyName1";
++ private static final String FAMILY2 = "familyName2";
++ private static final long EXPECTED_REFERENCE_ZERO = 0;
++ private static final long EXPECTED_REFERENCE_ONE = 1;
++ private static final long EXPECTED_REFERENCE_TWO = 2;
+
+ @Test
+ public void testOpenClose() throws Exception {
+ String caseName = getName();
+ FileSystem fs = FileSystem.get(conf);
+ Path testDir = FSUtils.getRootDir(conf);
+ Path outputDir = new Path(new Path(testDir, TABLE),
+ FAMILY);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withOutputDir(outputDir).withFileContext(meta).build();
+ MobTestUtil.writeStoreFile(writer, caseName);
+ CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
+ Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
+ cachedMobFile.open();
+ Assert.assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount());
+ cachedMobFile.open();
+ Assert.assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile.getReferenceCount());
+ cachedMobFile.close();
+ Assert.assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount());
+ cachedMobFile.close();
+ Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
+ }
+
+ @Test
+ public void testCompare() throws Exception {
+ String caseName = getName();
+ FileSystem fs = FileSystem.get(conf);
+ Path testDir = FSUtils.getRootDir(conf);
+ Path outputDir1 = new Path(new Path(testDir, TABLE),
+ FAMILY1);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+ StoreFile.Writer writer1 = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withOutputDir(outputDir1).withFileContext(meta).build();
+ MobTestUtil.writeStoreFile(writer1, caseName);
+ CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf);
+ Path outputDir2 = new Path(new Path(testDir, TABLE),
+ FAMILY2);
+ StoreFile.Writer writer2 = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withOutputDir(outputDir2)
+ .withFileContext(meta)
+ .build();
+ MobTestUtil.writeStoreFile(writer2, caseName);
+ CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf);
+ cachedMobFile1.access(1);
+ cachedMobFile2.access(2);
+ Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile2), 1);
+ Assert.assertEquals(cachedMobFile2.compareTo(cachedMobFile1), -1);
+ Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile1), 0);
+ }
+
+ @Test
+ public void testReadKeyValue() throws Exception {
+ FileSystem fs = FileSystem.get(conf);
+ Path testDir = FSUtils.getRootDir(conf);
+ Path outputDir = new Path(new Path(testDir, TABLE), "familyname");
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withOutputDir(outputDir).withFileContext(meta).build();
+ String caseName = getName();
+ MobTestUtil.writeStoreFile(writer, caseName);
+ CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
+ byte[] family = Bytes.toBytes(caseName);
+ byte[] qualify = Bytes.toBytes(caseName);
+ // Test the start key
+ byte[] startKey = Bytes.toBytes("aa"); // The start key bytes
+ KeyValue expectedKey =
+ new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
+ KeyValue seekKey = expectedKey.createKeyOnly(false);
- KeyValue kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ Cell cell = cachedMobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the end key
+ byte[] endKey = Bytes.toBytes("zz"); // The end key bytes
+ expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
+ seekKey = expectedKey.createKeyOnly(false);
- kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ cell = cachedMobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the random key
+ byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
+ expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
+ seekKey = expectedKey.createKeyOnly(false);
- kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ cell = cachedMobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the key which is less than the start key
+ byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa"
+ expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
+ seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
- kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ cell = cachedMobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the key which is more than the end key
+ byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz"
+ seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
- kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false));
- Assert.assertNull(kv);
++ cell = cachedMobFile.readCell(seekKey, false);
++ Assert.assertNull(cell);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
index 5e3a695,0000000..b91d4d1
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java
@@@ -1,193 -1,0 +1,190 @@@
+/**
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hbase.mob;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
- import org.apache.hadoop.hbase.client.HBaseAdmin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestDefaultMobStoreFlusher {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static byte [] row1 = Bytes.toBytes("row1");
+ private final static byte [] row2 = Bytes.toBytes("row2");
+ private final static byte [] family = Bytes.toBytes("family");
+ private final static byte [] qf1 = Bytes.toBytes("qf1");
+ private final static byte [] qf2 = Bytes.toBytes("qf2");
+ private final static byte [] value1 = Bytes.toBytes("value1");
+ private final static byte [] value2 = Bytes.toBytes("value2");
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testFlushNonMobFile() throws InterruptedException {
+ String TN = "testFlushNonMobFile";
- HTable table = null;
++ TableName tn = TableName.valueOf(TN);
++ Table table = null;
+ HBaseAdmin admin = null;
+
+ try {
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TN));
++ HTableDescriptor desc = new HTableDescriptor(tn);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
- admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
++ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), TN);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getTable(TableName.valueOf(TN));
+
+ //Put data
+ Put put0 = new Put(row1);
- put0.add(family, qf1, 1, value1);
++ put0.addColumn(family, qf1, 1, value1);
+ table.put(put0);
+
+ //Put more data
+ Put put1 = new Put(row2);
- put1.add(family, qf2, 1, value2);
++ put1.addColumn(family, qf2, 1, value2);
+ table.put(put1);
+
+ //Flush
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(tn);
+
+ Scan scan = new Scan();
+ scan.addColumn(family, qf1);
+ scan.setMaxVersions(4);
+ ResultScanner scanner = table.getScanner(scan);
+
+ //Compare
+ Result result = scanner.next();
+ int size = 0;
+ while (result != null) {
+ size++;
+ List<Cell> cells = result.getColumnCells(family, qf1);
+ // Verify the cell size
+ Assert.assertEquals(1, cells.size());
+ // Verify the value
+ Assert.assertEquals(Bytes.toString(value1),
+ Bytes.toString(CellUtil.cloneValue(cells.get(0))));
+ result = scanner.next();
+ }
+ scanner.close();
+ Assert.assertEquals(1, size);
+ admin.close();
+ } catch (MasterNotRunningException e1) {
+ e1.printStackTrace();
+ } catch (ZooKeeperConnectionException e2) {
+ e2.printStackTrace();
+ } catch (IOException e3) {
+ e3.printStackTrace();
+ }
+ }
+
+ @Test
+ public void testFlushMobFile() throws InterruptedException {
+ String TN = "testFlushMobFile";
- HTable table = null;
- HBaseAdmin admin = null;
++ TableName tn = TableName.valueOf(TN);
++ Table table = null;
++ Admin admin = null;
+
+ try {
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TN));
++ HTableDescriptor desc = new HTableDescriptor(tn);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
- admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
++ Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
++ admin = c.getAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), TN);
++ table = c.getTable(TableName.valueOf(TN));
+
+ //put data
+ Put put0 = new Put(row1);
- put0.add(family, qf1, 1, value1);
++ put0.addColumn(family, qf1, 1, value1);
+ table.put(put0);
+
+ //put more data
+ Put put1 = new Put(row2);
- put1.add(family, qf2, 1, value2);
++ put1.addColumn(family, qf2, 1, value2);
+ table.put(put1);
+
+ //flush
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(tn);
+
+ //Scan
+ Scan scan = new Scan();
+ scan.addColumn(family, qf1);
+ scan.setMaxVersions(4);
+ ResultScanner scanner = table.getScanner(scan);
+
+ //Compare
+ Result result = scanner.next();
+ int size = 0;
+ while (result != null) {
+ size++;
+ List<Cell> cells = result.getColumnCells(family, qf1);
+ // Verify the the cell size
+ Assert.assertEquals(1, cells.size());
+ // Verify the value
+ Assert.assertEquals(Bytes.toString(value1),
+ Bytes.toString(CellUtil.cloneValue(cells.get(0))));
+ result = scanner.next();
+ }
+ scanner.close();
+ Assert.assertEquals(1, size);
+ admin.close();
+ } catch (MasterNotRunningException e1) {
+ e1.printStackTrace();
+ } catch (ZooKeeperConnectionException e2) {
+ e2.printStackTrace();
+ } catch (IOException e3) {
+ e3.printStackTrace();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
index f16fa20,0000000..dfaeca6
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
@@@ -1,179 -1,0 +1,175 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
- import org.apache.hadoop.hbase.mob.MobUtils;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestExpiredMobFileCleaner {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static TableName tableName = TableName.valueOf("TestExpiredMobFileCleaner");
+ private final static String family = "family";
+ private final static byte[] row1 = Bytes.toBytes("row1");
+ private final static byte[] row2 = Bytes.toBytes("row2");
+ private final static byte[] qf = Bytes.toBytes("qf");
+
- private static HTable table;
++ private static BufferedMutator table;
+ private static Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+
+ TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ admin.disableTable(tableName);
+ admin.deleteTable(tableName);
+ admin.close();
+ TEST_UTIL.shutdownMiniCluster();
+ TEST_UTIL.getTestFileSystem().delete(TEST_UTIL.getDataTestDir(), true);
+ }
+
+ private void init() throws Exception {
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
+ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), tableName);
- table.setAutoFlush(false, false);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getBufferedMutator(tableName);
+ }
+
+ private void modifyColumnExpiryDays(int expireDays) throws Exception {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ // change ttl as expire days to make some row expired
+ int timeToLive = expireDays * secondsOfDay();
+ hcd.setTimeToLive(timeToLive);
+
+ admin.modifyColumn(tableName, hcd);
+ }
+
- private void putKVAndFlush(HTable table, byte[] row, byte[] value, long ts)
++ private void putKVAndFlush(BufferedMutator table, byte[] row, byte[] value, long ts)
+ throws Exception {
+
+ Put put = new Put(row, ts);
- put.add(Bytes.toBytes(family), qf, value);
- table.put(put);
++ put.addColumn(Bytes.toBytes(family), qf, value);
++ table.mutate(put);
+
- table.flushCommits();
++ table.flush();
+ admin.flush(tableName);
+ }
+
+ /**
+ * Creates a 3 day old hfile and an 1 day old hfile then sets expiry to 2 days.
+ * Verifies that the 3 day old hfile is removed but the 1 day one is still present
+ * after the expiry based cleaner is run.
+ */
+ @Test
+ public void testCleaner() throws Exception {
+ init();
+
+ Path mobDirPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
+
+ byte[] dummyData = makeDummyData(600);
+ long ts = System.currentTimeMillis() - 3 * secondsOfDay() * 1000; // 3 days before
+ putKVAndFlush(table, row1, dummyData, ts);
+ FileStatus[] firstFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
+ //the first mob file
+ assertEquals("Before cleanup without delay 1", 1, firstFiles.length);
+ String firstFile = firstFiles[0].getPath().getName();
+
+ ts = System.currentTimeMillis() - 1 * secondsOfDay() * 1000; // 1 day before
+ putKVAndFlush(table, row2, dummyData, ts);
+ FileStatus[] secondFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
+ //now there are 2 mob files
+ assertEquals("Before cleanup without delay 2", 2, secondFiles.length);
+ String f1 = secondFiles[0].getPath().getName();
+ String f2 = secondFiles[1].getPath().getName();
+ String secondFile = f1.equals(firstFile) ? f2 : f1;
+
+ modifyColumnExpiryDays(2); // ttl = 2, make the first row expired
+
+ //run the cleaner
+ String[] args = new String[2];
+ args[0] = tableName.getNameAsString();
+ args[1] = family;
+ ToolRunner.run(TEST_UTIL.getConfiguration(), new ExpiredMobFileCleaner(), args);
+
+ FileStatus[] filesAfterClean = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
+ String lastFile = filesAfterClean[0].getPath().getName();
+ //the first mob fie is removed
+ assertEquals("After cleanup without delay 1", 1, filesAfterClean.length);
+ assertEquals("After cleanup without delay 2", secondFile, lastFile);
+ }
+
+ private Path getMobFamilyPath(Configuration conf, TableName tableName, String familyName) {
+ Path p = new Path(MobUtils.getMobRegionPath(conf, tableName), familyName);
+ return p;
+ }
+
+ private int secondsOfDay() {
+ return 24 * 3600;
+ }
+
+ private byte[] makeDummyData(int size) {
+ byte [] dummyData = new byte[size];
+ new Random().nextBytes(dummyData);
+ return dummyData;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
index 055eac3,0000000..15aa7d4
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
@@@ -1,141 -1,0 +1,135 @@@
+/**
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hbase.mob;
+
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.HBaseAdmin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestMobDataBlockEncoding {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static byte [] row1 = Bytes.toBytes("row1");
+ private final static byte [] family = Bytes.toBytes("family");
+ private final static byte [] qf1 = Bytes.toBytes("qualifier1");
+ private final static byte [] qf2 = Bytes.toBytes("qualifier2");
+ protected final byte[] qf3 = Bytes.toBytes("qualifier3");
- private static HTable table;
++ private static Table table;
+ private static HBaseAdmin admin;
+ private static HColumnDescriptor hcd;
+ private static HTableDescriptor desc;
+ private static Random random = new Random();
+ private static long defaultThreshold = 10;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ public void setUp(long threshold, String TN, DataBlockEncoding encoding)
+ throws Exception {
+ desc = new HTableDescriptor(TableName.valueOf(TN));
+ hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(threshold);
+ hcd.setMaxVersions(4);
+ hcd.setDataBlockEncoding(encoding);
+ desc.addFamily(hcd);
- admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
++ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), TN);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getTable(TableName.valueOf(TN));
+ }
+
+ /**
+ * Generate the mob value.
+ *
+ * @param size the size of the value
+ * @return the mob value generated
+ */
+ private static byte[] generateMobValue(int size) {
+ byte[] mobVal = new byte[size];
+ random.nextBytes(mobVal);
+ return mobVal;
+ }
+
+ @Test
+ public void testDataBlockEncoding() throws Exception {
+ for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
+ testDataBlockEncoding(encoding);
+ }
+ }
+
+ public void testDataBlockEncoding(DataBlockEncoding encoding) throws Exception {
+ String TN = "testDataBlockEncoding" + encoding;
+ setUp(defaultThreshold, TN, encoding);
+ long ts1 = System.currentTimeMillis();
+ long ts2 = ts1 + 1;
+ long ts3 = ts1 + 2;
+ byte[] value = generateMobValue((int) defaultThreshold + 1);
+
+ Put put1 = new Put(row1);
- put1.add(family, qf1, ts3, value);
- put1.add(family, qf2, ts2, value);
- put1.add(family, qf3, ts1, value);
++ put1.addColumn(family, qf1, ts3, value);
++ put1.addColumn(family, qf2, ts2, value);
++ put1.addColumn(family, qf3, ts1, value);
+ table.put(put1);
-
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(TableName.valueOf(TN));
+
+ Scan scan = new Scan();
+ scan.setMaxVersions(4);
+
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ Assert.assertEquals(Bytes.toString(value),
+ Bytes.toString(CellUtil.cloneValue(cell)));
+ count++;
+ }
+ }
+ results.close();
+ Assert.assertEquals(3, count);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
index 01050ae,0000000..d05da24
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
@@@ -1,124 -1,0 +1,124 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
- import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestMobFile extends TestCase {
+ static final Log LOG = LogFactory.getLog(TestMobFile.class);
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private Configuration conf = TEST_UTIL.getConfiguration();
+ private CacheConfig cacheConf = new CacheConfig(conf);
+ private final String TABLE = "tableName";
+ private final String FAMILY = "familyName";
+
+ @Test
+ public void testReadKeyValue() throws Exception {
+ FileSystem fs = FileSystem.get(conf);
+ Path testDir = FSUtils.getRootDir(conf);
+ Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withOutputDir(outputDir)
+ .withFileContext(meta)
+ .build();
+ String caseName = getName();
+ MobTestUtil.writeStoreFile(writer, caseName);
+
+ MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
+ conf, cacheConf, BloomType.NONE));
+ byte[] family = Bytes.toBytes(caseName);
+ byte[] qualify = Bytes.toBytes(caseName);
+
+ // Test the start key
+ byte[] startKey = Bytes.toBytes("aa"); // The start key bytes
+ KeyValue expectedKey =
+ new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
+ KeyValue seekKey = expectedKey.createKeyOnly(false);
- KeyValue kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ Cell cell = mobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the end key
+ byte[] endKey = Bytes.toBytes("zz"); // The end key bytes
+ expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
+ seekKey = expectedKey.createKeyOnly(false);
- kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ cell = mobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the random key
+ byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
+ expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
+ seekKey = expectedKey.createKeyOnly(false);
- kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ cell = mobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the key which is less than the start key
+ byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa"
+ expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
+ seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
- kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
- MobTestUtil.assertKeyValuesEquals(expectedKey, kv);
++ cell = mobFile.readCell(seekKey, false);
++ MobTestUtil.assertCellEquals(expectedKey, cell);
+
+ // Test the key which is more than the end key
+ byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz"
+ seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
- kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false));
- assertNull(kv);
++ cell = mobFile.readCell(seekKey, false);
++ assertNull(cell);
+ }
+
+ @Test
+ public void testGetScanner() throws Exception {
+ FileSystem fs = FileSystem.get(conf);
+ Path testDir = FSUtils.getRootDir(conf);
+ Path outputDir = new Path(new Path(testDir, TABLE), FAMILY);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build();
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withOutputDir(outputDir)
+ .withFileContext(meta)
+ .build();
+ MobTestUtil.writeStoreFile(writer, getName());
+
+ MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(),
+ conf, cacheConf, BloomType.NONE));
+ assertNotNull(mobFile.getScanner());
+ assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
index 1a809a1,0000000..95fa1b9
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java
@@@ -1,207 -1,0 +1,202 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import java.io.IOException;
+import java.util.Date;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.regionserver.HMobStore;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
- import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestMobFileCache extends TestCase {
+ static final Log LOG = LogFactory.getLog(TestMobFileCache.class);
+ private HBaseTestingUtility UTIL;
+ private HRegion region;
+ private Configuration conf;
+ private MobCacheConfig mobCacheConf;
+ private MobFileCache mobFileCache;
+ private Date currentDate = new Date();
- private final String TEST_CACHE_SIZE = "2";
- private final int EXPECTED_CACHE_SIZE_ZERO = 0;
- private final int EXPECTED_CACHE_SIZE_ONE = 1;
- private final int EXPECTED_CACHE_SIZE_TWO = 2;
- private final int EXPECTED_CACHE_SIZE_THREE = 3;
- private final long EXPECTED_REFERENCE_ONE = 1;
- private final long EXPECTED_REFERENCE_TWO = 2;
-
- private final String TABLE = "tableName";
- private final String FAMILY1 = "family1";
- private final String FAMILY2 = "family2";
- private final String FAMILY3 = "family3";
-
- private final byte[] ROW = Bytes.toBytes("row");
- private final byte[] ROW2 = Bytes.toBytes("row2");
- private final byte[] VALUE = Bytes.toBytes("value");
- private final byte[] VALUE2 = Bytes.toBytes("value2");
- private final byte[] QF1 = Bytes.toBytes("qf1");
- private final byte[] QF2 = Bytes.toBytes("qf2");
- private final byte[] QF3 = Bytes.toBytes("qf3");
++ private static final String TEST_CACHE_SIZE = "2";
++ private static final int EXPECTED_CACHE_SIZE_ZERO = 0;
++ private static final int EXPECTED_CACHE_SIZE_ONE = 1;
++ private static final int EXPECTED_CACHE_SIZE_TWO = 2;
++ private static final int EXPECTED_CACHE_SIZE_THREE = 3;
++ private static final long EXPECTED_REFERENCE_ONE = 1;
++ private static final long EXPECTED_REFERENCE_TWO = 2;
++
++ private static final String TABLE = "tableName";
++ private static final String FAMILY1 = "family1";
++ private static final String FAMILY2 = "family2";
++ private static final String FAMILY3 = "family3";
++
++ private static final byte[] ROW = Bytes.toBytes("row");
++ private static final byte[] ROW2 = Bytes.toBytes("row2");
++ private static final byte[] VALUE = Bytes.toBytes("value");
++ private static final byte[] VALUE2 = Bytes.toBytes("value2");
++ private static final byte[] QF1 = Bytes.toBytes("qf1");
++ private static final byte[] QF2 = Bytes.toBytes("qf2");
++ private static final byte[] QF3 = Bytes.toBytes("qf3");
+
+ @Override
+ public void setUp() throws Exception {
+ UTIL = HBaseTestingUtility.createLocalHTU();
+ conf = UTIL.getConfiguration();
+ HTableDescriptor htd = UTIL.createTableDescriptor("testMobFileCache");
+ HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILY1);
+ hcd1.setMobEnabled(true);
+ hcd1.setMobThreshold(0);
+ HColumnDescriptor hcd2 = new HColumnDescriptor(FAMILY2);
+ hcd2.setMobEnabled(true);
+ hcd2.setMobThreshold(0);
+ HColumnDescriptor hcd3 = new HColumnDescriptor(FAMILY3);
+ hcd3.setMobEnabled(true);
+ hcd3.setMobThreshold(0);
+ htd.addFamily(hcd1);
+ htd.addFamily(hcd2);
+ htd.addFamily(hcd3);
+ region = UTIL.createLocalHRegion(htd, null, null);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ region.close();
+ region.getFilesystem().delete(UTIL.getDataTestDir(), true);
+ }
+
+ /**
+ * Create the mob store file.
- * @param family
+ */
+ private Path createMobStoreFile(String family) throws IOException {
+ return createMobStoreFile(HBaseConfiguration.create(), family);
+ }
+
+ /**
+ * Create the mob store file
- * @param conf
- * @param family
+ */
+ private Path createMobStoreFile(Configuration conf, String family) throws IOException {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMaxVersions(4);
+ hcd.setMobEnabled(true);
+ mobCacheConf = new MobCacheConfig(conf, hcd);
- return createMobStoreFile(conf, hcd);
++ return createMobStoreFile(hcd);
+ }
+
+ /**
+ * Create the mob store file
- * @param conf
- * @param hcd
+ */
- private Path createMobStoreFile(Configuration conf, HColumnDescriptor hcd)
++ private Path createMobStoreFile(HColumnDescriptor hcd)
+ throws IOException {
+ // Setting up a Store
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
++ TableName tn = TableName.valueOf(TABLE);
++ HTableDescriptor htd = new HTableDescriptor(tn);
+ htd.addFamily(hcd);
+ HMobStore mobStore = (HMobStore) region.getStore(hcd.getName());
+ KeyValue key1 = new KeyValue(ROW, hcd.getName(), QF1, 1, VALUE);
+ KeyValue key2 = new KeyValue(ROW, hcd.getName(), QF2, 1, VALUE);
+ KeyValue key3 = new KeyValue(ROW2, hcd.getName(), QF3, 1, VALUE2);
+ KeyValue[] keys = new KeyValue[] { key1, key2, key3 };
+ int maxKeyCount = keys.length;
- HRegionInfo regionInfo = new HRegionInfo();
++ HRegionInfo regionInfo = new HRegionInfo(tn);
+ StoreFile.Writer mobWriter = mobStore.createWriterInTmp(currentDate,
+ maxKeyCount, hcd.getCompactionCompression(), regionInfo.getStartKey());
+ Path mobFilePath = mobWriter.getPath();
+ String fileName = mobFilePath.getName();
+ mobWriter.append(key1);
+ mobWriter.append(key2);
+ mobWriter.append(key3);
+ mobWriter.close();
+ String targetPathName = MobUtils.formatDate(currentDate);
+ Path targetPath = new Path(mobStore.getPath(), targetPathName);
+ mobStore.commitFile(mobFilePath, targetPath);
+ return new Path(targetPath, fileName);
+ }
+
+ @Test
+ public void testMobFileCache() throws Exception {
+ FileSystem fs = FileSystem.get(conf);
+ conf.set(MobConstants.MOB_FILE_CACHE_SIZE_KEY, TEST_CACHE_SIZE);
+ mobFileCache = new MobFileCache(conf);
+ Path file1Path = createMobStoreFile(FAMILY1);
+ Path file2Path = createMobStoreFile(FAMILY2);
+ Path file3Path = createMobStoreFile(FAMILY3);
+
+ // Before open one file by the MobFileCache
+ assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize());
+ // Open one file by the MobFileCache
+ CachedMobFile cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(
+ fs, file1Path, mobCacheConf);
+ assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
+ assertNotNull(cachedMobFile1);
+ assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount());
+
+ // The evict is also managed by a schedule thread pool.
+ // And its check period is set as 3600 seconds by default.
+ // This evict should get the lock at the most time
+ mobFileCache.evict(); // Cache not full, evict it
+ assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
+ assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount());
+
+ mobFileCache.evictFile(file1Path.getName()); // Evict one file
+ assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize());
+ assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile1.getReferenceCount());
+
+ cachedMobFile1.close(); // Close the cached mob file
+
+ // Reopen three cached file
+ cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(
+ fs, file1Path, mobCacheConf);
+ assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
+ CachedMobFile cachedMobFile2 = (CachedMobFile) mobFileCache.openFile(
+ fs, file2Path, mobCacheConf);
+ assertEquals(EXPECTED_CACHE_SIZE_TWO, mobFileCache.getCacheSize());
+ CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile(
+ fs, file3Path, mobCacheConf);
+ // Before the evict
- // Evict the cache, should clost the first file 1
++ // Evict the cache, should close the first file 1
+ assertEquals(EXPECTED_CACHE_SIZE_THREE, mobFileCache.getCacheSize());
+ assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount());
+ assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile2.getReferenceCount());
+ assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile3.getReferenceCount());
+ mobFileCache.evict();
+ assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize());
+ assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile1.getReferenceCount());
+ assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile2.getReferenceCount());
+ assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile3.getReferenceCount());
+ }
+}
[24/50] [abbrv] hbase git commit: HBASE-13209 Procedure V2 - master
Add/Modify/Delete Column Family (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-protocol/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index 97d1af6..a07516d 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -89,3 +89,49 @@ message DeleteTableStateData {
required TableName table_name = 2;
repeated RegionInfo region_info = 3;
}
+
+enum AddColumnFamilyState {
+ ADD_COLUMN_FAMILY_PREPARE = 1;
+ ADD_COLUMN_FAMILY_PRE_OPERATION = 2;
+ ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;
+ ADD_COLUMN_FAMILY_POST_OPERATION = 4;
+ ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5;
+}
+
+message AddColumnFamilyStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required ColumnFamilySchema columnfamily_schema = 3;
+ optional TableSchema unmodified_table_schema = 4;
+}
+
+enum ModifyColumnFamilyState {
+ MODIFY_COLUMN_FAMILY_PREPARE = 1;
+ MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2;
+ MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;
+ MODIFY_COLUMN_FAMILY_POST_OPERATION = 4;
+ MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5;
+}
+
+message ModifyColumnFamilyStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required ColumnFamilySchema columnfamily_schema = 3;
+ optional TableSchema unmodified_table_schema = 4;
+}
+
+enum DeleteColumnFamilyState {
+ DELETE_COLUMN_FAMILY_PREPARE = 1;
+ DELETE_COLUMN_FAMILY_PRE_OPERATION = 2;
+ DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;
+ DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4;
+ DELETE_COLUMN_FAMILY_POST_OPERATION = 5;
+ DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6;
+}
+
+message DeleteColumnFamilyStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required bytes columnfamily_name = 3;
+ optional TableSchema unmodified_table_schema = 4;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ba739b2..2e33095 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -90,14 +90,14 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
-import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
-import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
-import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
@@ -1618,8 +1618,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
return;
}
}
- //TODO: we should process this (and some others) in an executor
- new TableAddFamilyHandler(tableName, columnDescriptor, this, this).prepare().process();
+ // Execute the operation synchronously - wait for the operation to complete before continuing.
+ long procId =
+ this.procedureExecutor.submitProcedure(new AddColumnFamilyProcedure(procedureExecutor
+ .getEnvironment(), tableName, columnDescriptor));
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
if (cpHost != null) {
cpHost.postAddColumn(tableName, columnDescriptor);
}
@@ -1637,8 +1640,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
- new TableModifyFamilyHandler(tableName, descriptor, this, this)
- .prepare().process();
+
+ // Execute the operation synchronously - wait for the operation to complete before continuing.
+ long procId =
+ this.procedureExecutor.submitProcedure(new ModifyColumnFamilyProcedure(procedureExecutor
+ .getEnvironment(), tableName, descriptor));
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+
if (cpHost != null) {
cpHost.postModifyColumn(tableName, descriptor);
}
@@ -1654,7 +1662,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName));
- new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process();
+
+ // Execute the operation synchronously - wait for the operation to complete before continuing.
+ long procId =
+ this.procedureExecutor.submitProcedure(new DeleteColumnFamilyProcedure(procedureExecutor
+ .getEnvironment(), tableName, columnName));
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+
if (cpHost != null) {
cpHost.postDeleteColumn(tableName, columnName);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
index 7b5c5c5..3bbef0a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
@@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.master.handler;
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
/**
- * Handles adding a new family to an existing table.
+ * Handles Deleting a column family from an existing table.
*/
@InterfaceAudience.Private
public class TableDeleteFamilyHandler extends TableEventHandler {
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
new file mode 100644
index 0000000..98a00c2
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java
@@ -0,0 +1,409 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * The procedure to add a column family to an existing table.
+ */
+@InterfaceAudience.Private
+public class AddColumnFamilyProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, AddColumnFamilyState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private TableName tableName;
+ private HTableDescriptor unmodifiedHTableDescriptor;
+ private HColumnDescriptor cfDescriptor;
+ private UserGroupInformation user;
+
+ private List<HRegionInfo> regionInfoList;
+ private Boolean traceEnabled;
+
+ public AddColumnFamilyProcedure() {
+ this.unmodifiedHTableDescriptor = null;
+ this.regionInfoList = null;
+ this.traceEnabled = null;
+ }
+
+ public AddColumnFamilyProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final HColumnDescriptor cfDescriptor) throws IOException {
+ this.tableName = tableName;
+ this.cfDescriptor = cfDescriptor;
+ this.user = env.getRequestUser().getUGI();
+ this.unmodifiedHTableDescriptor = null;
+ this.regionInfoList = null;
+ this.traceEnabled = null;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final AddColumnFamilyState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case ADD_COLUMN_FAMILY_PREPARE:
+ prepareAdd(env);
+ setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_PRE_OPERATION);
+ break;
+ case ADD_COLUMN_FAMILY_PRE_OPERATION:
+ preAdd(env, state);
+ setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR);
+ break;
+ case ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR:
+ updateTableDescriptor(env);
+ setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_POST_OPERATION);
+ break;
+ case ADD_COLUMN_FAMILY_POST_OPERATION:
+ postAdd(env, state);
+ setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS);
+ break;
+ case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ reOpenAllRegionsIfTableIsOnline(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ LOG.warn("Error trying to add the column family" + getColumnFamilyName() + " to the table "
+ + tableName + " (in state=" + state + ")", e);
+
+ setFailure("master-add-columnfamily", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final AddColumnFamilyState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ break; // Nothing to undo.
+ case ADD_COLUMN_FAMILY_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo?
+ break;
+ case ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR:
+ restoreTableDescriptor(env);
+ break;
+ case ADD_COLUMN_FAMILY_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo?
+ break;
+ case ADD_COLUMN_FAMILY_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step " + state + " for adding the column family"
+ + getColumnFamilyName() + " to the table " + tableName, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected AddColumnFamilyState getState(final int stateId) {
+ return AddColumnFamilyState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final AddColumnFamilyState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected AddColumnFamilyState getInitialState() {
+ return AddColumnFamilyState.ADD_COLUMN_FAMILY_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(AddColumnFamilyState state) {
+ if (aborted.get()) {
+ setAbortFailure("add-columnfamily", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_ADD_FAMILY.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.AddColumnFamilyStateData.Builder addCFMsg =
+ MasterProcedureProtos.AddColumnFamilyStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setColumnfamilySchema(cfDescriptor.convert());
+ if (unmodifiedHTableDescriptor != null) {
+ addCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert());
+ }
+
+ addCFMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.AddColumnFamilyStateData addCFMsg =
+ MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(addCFMsg.getTableName());
+ cfDescriptor = HColumnDescriptor.convert(addCFMsg.getColumnfamilySchema());
+ if (addCFMsg.hasUnmodifiedTableSchema()) {
+ unmodifiedHTableDescriptor = HTableDescriptor.convert(addCFMsg.getUnmodifiedTableSchema());
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(", columnfamily=");
+ if (cfDescriptor != null) {
+ sb.append(getColumnFamilyName());
+ } else {
+ sb.append("Unknown");
+ }
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Action before any real action of adding column family.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareAdd(final MasterProcedureEnv env) throws IOException {
+ // Checks whether the table is allowed to be modified.
+ MasterDDLOperationHelper.checkTableModifiable(env, tableName);
+
+ // In order to update the descriptor, we need to retrieve the old descriptor for comparison.
+ unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedHTableDescriptor == null) {
+ throw new IOException("HTableDescriptor missing for " + tableName);
+ }
+ if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
+ throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName()
+ + "' in table '" + tableName + "' already exists so cannot be added");
+ }
+ }
+
+ /**
+ * Action before adding column family.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preAdd(final MasterProcedureEnv env, final AddColumnFamilyState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Add the column family to the file system
+ */
+ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ // Update table descriptor
+ LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString());
+
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+
+ if (htd.hasFamily(cfDescriptor.getName())) {
+ // It is possible to reach this situation, as we could already add the column family
+ // to table descriptor, but the master failover happens before we complete this state.
+ // We should be able to handle running this function multiple times without causing problem.
+ return;
+ }
+
+ htd.addFamily(cfDescriptor);
+ env.getMasterServices().getTableDescriptors().add(htd);
+ }
+
+ /**
+ * Restore the table descriptor back to pre-add
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (htd.hasFamily(cfDescriptor.getName())) {
+ // Remove the column family from file system and update the table descriptor to
+ // the before-add-column-family-state
+ MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName,
+ getRegionInfoList(env), cfDescriptor.getName());
+
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+
+ // Make sure regions are opened after table descriptor is updated.
+ reOpenAllRegionsIfTableIsOnline(env);
+ }
+ }
+
+ /**
+ * Action after adding column family.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postAdd(final MasterProcedureEnv env, final AddColumnFamilyState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Last action from the procedure - executed when online schema change is supported.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
+ // This operation only run when the table is enabled.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ return;
+ }
+
+ if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) {
+ LOG.info("Completed add column family operation on table " + getTableName());
+ } else {
+ LOG.warn("Error on reopening the regions on table " + getTableName());
+ }
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ private String getColumnFamilyName() {
+ return cfDescriptor.getNameAsString();
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final AddColumnFamilyState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case ADD_COLUMN_FAMILY_PRE_OPERATION:
+ cpHost.preAddColumnHandler(tableName, cfDescriptor);
+ break;
+ case ADD_COLUMN_FAMILY_POST_OPERATION:
+ cpHost.postAddColumnHandler(tableName, cfDescriptor);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+
+ private List<HRegionInfo> getRegionInfoList(final MasterProcedureEnv env) throws IOException {
+ if (regionInfoList == null) {
+ regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ }
+ return regionInfoList;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
new file mode 100644
index 0000000..a053c89
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java
@@ -0,0 +1,441 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * The procedure to delete a column family from an existing table.
+ */
+@InterfaceAudience.Private
+public class DeleteColumnFamilyProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, DeleteColumnFamilyState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private HTableDescriptor unmodifiedHTableDescriptor;
+ private TableName tableName;
+ private byte [] familyName;
+ private UserGroupInformation user;
+
+ private List<HRegionInfo> regionInfoList;
+ private Boolean traceEnabled;
+
+ public DeleteColumnFamilyProcedure() {
+ this.unmodifiedHTableDescriptor = null;
+ this.regionInfoList = null;
+ this.traceEnabled = null;
+ }
+
+ public DeleteColumnFamilyProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final byte[] familyName) throws IOException {
+ this.tableName = tableName;
+ this.familyName = familyName;
+ this.user = env.getRequestUser().getUGI();
+ this.unmodifiedHTableDescriptor = null;
+ this.regionInfoList = null;
+ this.traceEnabled = null;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, DeleteColumnFamilyState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case DELETE_COLUMN_FAMILY_PREPARE:
+ prepareDelete(env);
+ setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_PRE_OPERATION);
+ break;
+ case DELETE_COLUMN_FAMILY_PRE_OPERATION:
+ preDelete(env, state);
+ setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR);
+ break;
+ case DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR:
+ updateTableDescriptor(env);
+ setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT);
+ break;
+ case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT:
+ deleteFromFs(env);
+ setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_POST_OPERATION);
+ break;
+ case DELETE_COLUMN_FAMILY_POST_OPERATION:
+ postDelete(env, state);
+ setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS);
+ break;
+ case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ reOpenAllRegionsIfTableIsOnline(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ if (!isRollbackSupported(state)) {
+ // We reach a state that cannot be rolled back. We just need to keep retry.
+ LOG.warn("Error trying to delete the column family " + getColumnFamilyName()
+ + " from table " + tableName + "(in state=" + state + ")", e);
+ } else {
+ LOG.error("Error trying to delete the column family " + getColumnFamilyName()
+ + " from table " + tableName + "(in state=" + state + ")", e);
+ setFailure("master-delete-column-family", e);
+ }
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final DeleteColumnFamilyState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ break; // Nothing to undo.
+ case DELETE_COLUMN_FAMILY_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo?
+ break;
+ case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT:
+ // Once we reach to this state - we could NOT rollback - as it is tricky to undelete
+ // the deleted files. We are not suppose to reach here, throw exception so that we know
+ // there is a code bug to investigate.
+ throw new UnsupportedOperationException(this + " rollback of state=" + state
+ + " is unsupported.");
+ case DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR:
+ restoreTableDescriptor(env);
+ break;
+ case DELETE_COLUMN_FAMILY_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo?
+ break;
+ case DELETE_COLUMN_FAMILY_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step " + state + " for deleting the column family"
+ + getColumnFamilyName() + " to the table " + tableName, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected DeleteColumnFamilyState getState(final int stateId) {
+ return DeleteColumnFamilyState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final DeleteColumnFamilyState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected DeleteColumnFamilyState getInitialState() {
+ return DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(DeleteColumnFamilyState state) {
+ if (aborted.get() && isRollbackSupported(state)) {
+ setAbortFailure("delete-columnfamily", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_DELETE_FAMILY.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DeleteColumnFamilyStateData.Builder deleteCFMsg =
+ MasterProcedureProtos.DeleteColumnFamilyStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setColumnfamilyName(ByteStringer.wrap(familyName));
+ if (unmodifiedHTableDescriptor != null) {
+ deleteCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert());
+ }
+
+ deleteCFMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+ MasterProcedureProtos.DeleteColumnFamilyStateData deleteCFMsg =
+ MasterProcedureProtos.DeleteColumnFamilyStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(deleteCFMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(deleteCFMsg.getTableName());
+ familyName = deleteCFMsg.getColumnfamilyName().toByteArray();
+
+ if (deleteCFMsg.hasUnmodifiedTableSchema()) {
+ unmodifiedHTableDescriptor = HTableDescriptor.convert(deleteCFMsg.getUnmodifiedTableSchema());
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(", columnfamily=");
+ if (familyName != null) {
+ sb.append(getColumnFamilyName());
+ } else {
+ sb.append("Unknown");
+ }
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Action before any real action of deleting column family.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareDelete(final MasterProcedureEnv env) throws IOException {
+ // Checks whether the table is allowed to be modified.
+ MasterDDLOperationHelper.checkTableModifiable(env, tableName);
+
+ // In order to update the descriptor, we need to retrieve the old descriptor for comparison.
+ unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedHTableDescriptor == null) {
+ throw new IOException("HTableDescriptor missing for " + tableName);
+ }
+ if (!unmodifiedHTableDescriptor.hasFamily(familyName)) {
+ throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ + "' does not exist, so it cannot be deleted");
+ }
+ }
+
+ /**
+ * Action before deleting column family.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preDelete(final MasterProcedureEnv env, final DeleteColumnFamilyState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Remove the column family from the file system and update the table descriptor
+ */
+ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ // Update table descriptor
+ LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName());
+
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+
+ if (!htd.hasFamily(familyName)) {
+ // It is possible to reach this situation, as we could already delete the column family
+ // from table descriptor, but the master failover happens before we complete this state.
+ // We should be able to handle running this function multiple times without causing problem.
+ return;
+ }
+
+ htd.removeFamily(familyName);
+ env.getMasterServices().getTableDescriptors().add(htd);
+ }
+
+ /**
+ * Restore back to the old descriptor
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+
+ // Make sure regions are opened after table descriptor is updated.
+ reOpenAllRegionsIfTableIsOnline(env);
+ }
+
+ /**
+ * Remove the column family from the file system
+ **/
+ private void deleteFromFs(final MasterProcedureEnv env) throws IOException {
+ MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName,
+ getRegionInfoList(env), familyName);
+ }
+
+ /**
+ * Action after deleting column family.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postDelete(final MasterProcedureEnv env, final DeleteColumnFamilyState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Last action from the procedure - executed when online schema change is supported.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
+ // This operation only run when the table is enabled.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ return;
+ }
+
+ if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) {
+ LOG.info("Completed delete column family operation on table " + getTableName());
+ } else {
+ LOG.warn("Error on reopening the regions on table " + getTableName());
+ }
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ private String getColumnFamilyName() {
+ return Bytes.toString(familyName);
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env,
+ final DeleteColumnFamilyState state) throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case DELETE_COLUMN_FAMILY_PRE_OPERATION:
+ cpHost.preDeleteColumnHandler(tableName, familyName);
+ break;
+ case DELETE_COLUMN_FAMILY_POST_OPERATION:
+ cpHost.postDeleteColumnHandler(tableName, familyName);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+
+ /*
+ * Check whether we are in the state that can be rollback
+ */
+ private boolean isRollbackSupported(final DeleteColumnFamilyState state) {
+ switch (state) {
+ case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ case DELETE_COLUMN_FAMILY_POST_OPERATION:
+ case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT:
+ // It is not safe to rollback if we reach to these states.
+ return false;
+ default:
+ break;
+ }
+ return true;
+ }
+
+ private List<HRegionInfo> getRegionInfoList(final MasterProcedureEnv env) throws IOException {
+ if (regionInfoList == null) {
+ regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ }
+ return regionInfoList;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
new file mode 100644
index 0000000..138ebd8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java
@@ -0,0 +1,384 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * The procedure to modify a column family from an existing table.
+ */
+@InterfaceAudience.Private
+public class ModifyColumnFamilyProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, ModifyColumnFamilyState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private TableName tableName;
+ private HTableDescriptor unmodifiedHTableDescriptor;
+ private HColumnDescriptor cfDescriptor;
+ private UserGroupInformation user;
+
+ private Boolean traceEnabled;
+
+ public ModifyColumnFamilyProcedure() {
+ this.unmodifiedHTableDescriptor = null;
+ this.traceEnabled = null;
+ }
+
+ public ModifyColumnFamilyProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final HColumnDescriptor cfDescriptor) throws IOException {
+ this.tableName = tableName;
+ this.cfDescriptor = cfDescriptor;
+ this.user = env.getRequestUser().getUGI();
+ this.unmodifiedHTableDescriptor = null;
+ this.traceEnabled = null;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env,
+ final ModifyColumnFamilyState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case MODIFY_COLUMN_FAMILY_PREPARE:
+ prepareModify(env);
+ setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_PRE_OPERATION);
+ break;
+ case MODIFY_COLUMN_FAMILY_PRE_OPERATION:
+ preModify(env, state);
+ setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR);
+ break;
+ case MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR:
+ updateTableDescriptor(env);
+ setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_POST_OPERATION);
+ break;
+ case MODIFY_COLUMN_FAMILY_POST_OPERATION:
+ postModify(env, state);
+ setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS);
+ break;
+ case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ reOpenAllRegionsIfTableIsOnline(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ LOG.warn("Error trying to modify the column family " + getColumnFamilyName()
+ + " of the table " + tableName + "(in state=" + state + ")", e);
+
+ setFailure("master-modify-columnfamily", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final ModifyColumnFamilyState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS:
+ break; // Nothing to undo.
+ case MODIFY_COLUMN_FAMILY_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo?
+ break;
+ case MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR:
+ restoreTableDescriptor(env);
+ break;
+ case MODIFY_COLUMN_FAMILY_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo?
+ break;
+ case MODIFY_COLUMN_FAMILY_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step " + state + " for adding the column family"
+ + getColumnFamilyName() + " to the table " + tableName, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected ModifyColumnFamilyState getState(final int stateId) {
+ return ModifyColumnFamilyState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final ModifyColumnFamilyState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected ModifyColumnFamilyState getInitialState() {
+ return ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(ModifyColumnFamilyState state) {
+ if (aborted.get()) {
+ setAbortFailure("modify-columnfamily", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_MODIFY_FAMILY.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.ModifyColumnFamilyStateData.Builder modifyCFMsg =
+ MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setColumnfamilySchema(cfDescriptor.convert());
+ if (unmodifiedHTableDescriptor != null) {
+ modifyCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert());
+ }
+
+ modifyCFMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.ModifyColumnFamilyStateData modifyCFMsg =
+ MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName());
+ cfDescriptor = HColumnDescriptor.convert(modifyCFMsg.getColumnfamilySchema());
+ if (modifyCFMsg.hasUnmodifiedTableSchema()) {
+ unmodifiedHTableDescriptor = HTableDescriptor.convert(modifyCFMsg.getUnmodifiedTableSchema());
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(", columnfamily=");
+ if (cfDescriptor != null) {
+ sb.append(getColumnFamilyName());
+ } else {
+ sb.append("Unknown");
+ }
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Action before any real action of modifying column family.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareModify(final MasterProcedureEnv env) throws IOException {
+ // Checks whether the table is allowed to be modified.
+ MasterDDLOperationHelper.checkTableModifiable(env, tableName);
+
+ unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
+ if (unmodifiedHTableDescriptor == null) {
+ throw new IOException("HTableDescriptor missing for " + tableName);
+ }
+ if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
+ throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ + "' does not exist, so it cannot be modified");
+ }
+ }
+
+ /**
+ * Action before modifying column family.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preModify(final MasterProcedureEnv env, final ModifyColumnFamilyState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Modify the column family from the file system
+ */
+ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ // Update table descriptor
+ LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString());
+
+ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
+ htd.modifyFamily(cfDescriptor);
+ env.getMasterServices().getTableDescriptors().add(htd);
+ }
+
+ /**
+ * Restore back to the old descriptor
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+
+ // Make sure regions are opened after table descriptor is updated.
+ reOpenAllRegionsIfTableIsOnline(env);
+ }
+
+ /**
+ * Action after modifying column family.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postModify(final MasterProcedureEnv env, final ModifyColumnFamilyState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Last action from the procedure - executed when online schema change is supported.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
+ // This operation only run when the table is enabled.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ return;
+ }
+
+ List<HRegionInfo> regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), regionInfoList)) {
+ LOG.info("Completed add column family operation on table " + getTableName());
+ } else {
+ LOG.warn("Error on reopening the regions on table " + getTableName());
+ }
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ private String getColumnFamilyName() {
+ return cfDescriptor.getNameAsString();
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env,
+ final ModifyColumnFamilyState state) throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case MODIFY_COLUMN_FAMILY_PRE_OPERATION:
+ cpHost.preModifyColumnHandler(tableName, cfDescriptor);
+ break;
+ case MODIFY_COLUMN_FAMILY_POST_OPERATION:
+ cpHost.postModifyColumnHandler(tableName, cfDescriptor);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
index a19a975..44b9803 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
@@ -103,37 +103,6 @@ public class TestTableLockManager {
TEST_UTIL.shutdownMiniCluster();
}
- @Test(timeout = 600000)
- public void testLockTimeoutException() throws Exception {
- Configuration conf = TEST_UTIL.getConfiguration();
- conf.setInt(TableLockManager.TABLE_WRITE_LOCK_TIMEOUT_MS, 3000);
- prepareMiniCluster();
- HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
- master.getMasterCoprocessorHost().load(TestLockTimeoutExceptionMasterObserver.class,
- 0, TEST_UTIL.getConfiguration());
-
- ExecutorService executor = Executors.newSingleThreadExecutor();
- Future<Object> shouldFinish = executor.submit(new Callable<Object>() {
- @Override
- public Object call() throws Exception {
- Admin admin = TEST_UTIL.getHBaseAdmin();
- admin.deleteColumn(TABLE_NAME, FAMILY);
- return null;
- }
- });
-
- deleteColumn.await();
-
- try {
- Admin admin = TEST_UTIL.getHBaseAdmin();
- admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
- fail("Was expecting TableLockTimeoutException");
- } catch (LockTimeoutException ex) {
- //expected
- }
- shouldFinish.get();
- }
-
public static class TestLockTimeoutExceptionMasterObserver extends BaseMasterObserver {
@Override
public void preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java
index 5b2f4f6..b5c82e1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java
@@ -29,20 +29,22 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.WALSplitter;
+import org.junit.After;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -64,10 +66,17 @@ public class TestTableDeleteFamilyHandler {
*/
@BeforeClass
public static void beforeAllTests() throws Exception {
-
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
TEST_UTIL.startMiniCluster(2);
+ }
+
+ @AfterClass
+ public static void afterAllTests() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+ @Before
+ public void setup() throws IOException, InterruptedException {
// Create a table of three families. This will assign a region.
TEST_UTIL.createTable(TABLENAME, FAMILIES);
Table t = TEST_UTIL.getConnection().getTable(TABLENAME);
@@ -86,22 +95,17 @@ public class TestTableDeleteFamilyHandler {
TEST_UTIL.flush();
t.close();
- }
- @AfterClass
- public static void afterAllTests() throws Exception {
- TEST_UTIL.deleteTable(TABLENAME);
- TEST_UTIL.shutdownMiniCluster();
+ TEST_UTIL.ensureSomeRegionServersAvailable(2);
}
- @Before
- public void setup() throws IOException, InterruptedException {
- TEST_UTIL.ensureSomeRegionServersAvailable(2);
+ @After
+ public void cleanup() throws Exception {
+ TEST_UTIL.deleteTable(TABLENAME);
}
@Test
public void deleteColumnFamilyWithMultipleRegions() throws Exception {
-
Admin admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME);
@@ -114,7 +118,6 @@ public class TestTableDeleteFamilyHandler {
assertEquals(3, beforehtd.getColumnFamilies().length);
HColumnDescriptor[] families = beforehtd.getColumnFamilies();
for (int i = 0; i < families.length; i++) {
-
assertTrue(families[i].getNameAsString().equals("cf" + (i + 1)));
}
@@ -179,4 +182,95 @@ public class TestTableDeleteFamilyHandler {
}
}
+ @Test
+ public void deleteColumnFamilyTwice() throws Exception {
+
+ Admin admin = TEST_UTIL.getHBaseAdmin();
+ HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME);
+ String cfToDelete = "cf1";
+
+ FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
+
+ // 1 - Check if table exists in descriptor
+ assertTrue(admin.isTableAvailable(TABLENAME));
+
+ // 2 - Check if all the target column family exist in descriptor
+ HColumnDescriptor[] families = beforehtd.getColumnFamilies();
+ Boolean foundCF = false;
+ int i;
+ for (i = 0; i < families.length; i++) {
+ if (families[i].getNameAsString().equals(cfToDelete)) {
+ foundCF = true;
+ break;
+ }
+ }
+ assertTrue(foundCF);
+
+ // 3 - Check if table exists in FS
+ Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME);
+ assertTrue(fs.exists(tableDir));
+
+ // 4 - Check if all the target column family exist in FS
+ FileStatus[] fileStatus = fs.listStatus(tableDir);
+ foundCF = false;
+ for (i = 0; i < fileStatus.length; i++) {
+ if (fileStatus[i].isDirectory() == true) {
+ FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {
+ @Override
+ public boolean accept(Path p) {
+ if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
+ return false;
+ }
+ return true;
+ }
+ });
+ for (int j = 0; j < cf.length; j++) {
+ if (cf[j].isDirectory() == true && cf[j].getPath().getName().equals(cfToDelete)) {
+ foundCF = true;
+ break;
+ }
+ }
+ }
+ if (foundCF) {
+ break;
+ }
+ }
+ assertTrue(foundCF);
+
+ // TEST - Disable and delete the column family
+ if (admin.isTableEnabled(TABLENAME)) {
+ admin.disableTable(TABLENAME);
+ }
+ admin.deleteColumn(TABLENAME, Bytes.toBytes(cfToDelete));
+
+ // 5 - Check if the target column family is gone from the FS
+ fileStatus = fs.listStatus(tableDir);
+ for (i = 0; i < fileStatus.length; i++) {
+ if (fileStatus[i].isDirectory() == true) {
+ FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {
+ @Override
+ public boolean accept(Path p) {
+ if (WALSplitter.isSequenceIdFile(p)) {
+ return false;
+ }
+ return true;
+ }
+ });
+ for (int j = 0; j < cf.length; j++) {
+ if (cf[j].isDirectory() == true) {
+ assertFalse(cf[j].getPath().getName().equals(cfToDelete));
+ }
+ }
+ }
+ }
+
+ try {
+ // Test: delete again
+ admin.deleteColumn(TABLENAME, Bytes.toBytes(cfToDelete));
+ Assert.fail("Delete a non-exist column family should fail");
+ } catch (InvalidFamilyOperationException e) {
+ // Expected.
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
index 0d51875..c4772ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
@@ -52,7 +54,7 @@ import org.junit.rules.TestName;
*/
@Category({MasterTests.class, LargeTests.class})
public class TestTableDescriptorModification {
-
+
@Rule public TestName name = new TestName();
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static TableName TABLE_NAME = null;
@@ -74,7 +76,7 @@ public class TestTableDescriptorModification {
TABLE_NAME = TableName.valueOf(name.getMethodName());
}
-
+
@AfterClass
public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster();
@@ -124,6 +126,95 @@ public class TestTableDescriptorModification {
}
@Test
+ public void testAddSameColumnFamilyTwice() throws IOException {
+ Admin admin = TEST_UTIL.getHBaseAdmin();
+ // Create a table with one families
+ HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+ baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+ admin.createTable(baseHtd);
+ admin.disableTable(TABLE_NAME);
+ try {
+ // Verify the table descriptor
+ verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+
+ // Modify the table removing one family and verify the descriptor
+ admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1));
+ verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
+
+ try {
+ // Add same column family again - expect failure
+ admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1));
+ Assert.fail("Delete a non-exist column family should fail");
+ } catch (InvalidFamilyOperationException e) {
+ // Expected.
+ }
+
+ } finally {
+ admin.deleteTable(TABLE_NAME);
+ }
+ }
+
+ @Test
+ public void testModifyColumnFamily() throws IOException {
+ Admin admin = TEST_UTIL.getHBaseAdmin();
+
+ HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_0);
+ int blockSize = cfDescriptor.getBlocksize();
+ // Create a table with one families
+ HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+ baseHtd.addFamily(cfDescriptor);
+ admin.createTable(baseHtd);
+ admin.disableTable(TABLE_NAME);
+ try {
+ // Verify the table descriptor
+ verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+
+ int newBlockSize = 2 * blockSize;
+ cfDescriptor.setBlocksize(newBlockSize);
+
+ // Modify colymn family
+ admin.modifyColumn(TABLE_NAME, cfDescriptor);
+
+ HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME);
+ HColumnDescriptor hcfd = htd.getFamily(FAMILY_0);
+ assertTrue(hcfd.getBlocksize() == newBlockSize);
+ } finally {
+ admin.deleteTable(TABLE_NAME);
+ }
+ }
+
+ @Test
+ public void testModifyNonExistingColumnFamily() throws IOException {
+ Admin admin = TEST_UTIL.getHBaseAdmin();
+
+ HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_1);
+ int blockSize = cfDescriptor.getBlocksize();
+ // Create a table with one families
+ HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+ baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+ admin.createTable(baseHtd);
+ admin.disableTable(TABLE_NAME);
+ try {
+ // Verify the table descriptor
+ verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+
+ int newBlockSize = 2 * blockSize;
+ cfDescriptor.setBlocksize(newBlockSize);
+
+ // Modify a column family that is not in the table.
+ try {
+ admin.modifyColumn(TABLE_NAME, cfDescriptor);
+ Assert.fail("Modify a non-exist column family should fail");
+ } catch (InvalidFamilyOperationException e) {
+ // Expected.
+ }
+
+ } finally {
+ admin.deleteTable(TABLE_NAME);
+ }
+ }
+
+ @Test
public void testDeleteColumn() throws IOException {
Admin admin = TEST_UTIL.getHBaseAdmin();
// Create a table with two families
@@ -144,6 +235,35 @@ public class TestTableDescriptorModification {
}
}
+ @Test
+ public void testDeleteSameColumnFamilyTwice() throws IOException {
+ Admin admin = TEST_UTIL.getHBaseAdmin();
+ // Create a table with two families
+ HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
+ baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
+ baseHtd.addFamily(new HColumnDescriptor(FAMILY_1));
+ admin.createTable(baseHtd);
+ admin.disableTable(TABLE_NAME);
+ try {
+ // Verify the table descriptor
+ verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
+
+ // Modify the table removing one family and verify the descriptor
+ admin.deleteColumn(TABLE_NAME, FAMILY_1);
+ verifyTableDescriptor(TABLE_NAME, FAMILY_0);
+
+ try {
+ // Delete again - expect failure
+ admin.deleteColumn(TABLE_NAME, FAMILY_1);
+ Assert.fail("Delete a non-exist column family should fail");
+ } catch (Exception e) {
+ // Expected.
+ }
+ } finally {
+ admin.deleteTable(TABLE_NAME);
+ }
+ }
+
private void verifyTableDescriptor(final TableName tableName,
final byte[]... families) throws IOException {
Admin admin = TEST_UTIL.getHBaseAdmin();
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index d6c19e1..bc97bb9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -295,6 +295,40 @@ public class MasterProcedureTestingUtility {
ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId));
}
+ public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName,
+ final String family) throws IOException {
+ TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
+ assertTrue(htd != null);
+
+ assertTrue(htd.getHTableDescriptor().hasFamily(family.getBytes()));
+ }
+
+ public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
+ final String family) throws IOException {
+ // verify htd
+ TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
+ assertTrue(htd != null);
+ assertFalse(htd.getHTableDescriptor().hasFamily(family.getBytes()));
+
+ // verify fs
+ final FileSystem fs = master.getMasterFileSystem().getFileSystem();
+ final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
+ for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
+ final Path familyDir = new Path(regionDir, family);
+ assertFalse(family + " family dir should not exist", fs.exists(familyDir));
+ }
+ }
+
+ public static void validateColumnFamilyModification(final HMaster master,
+ final TableName tableName, final String family, HColumnDescriptor columnDescriptor)
+ throws IOException {
+ TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName);
+ assertTrue(htd != null);
+
+ HColumnDescriptor hcfd = htd.getHTableDescriptor().getFamily(family.getBytes());
+ assertTrue(hcfd.equals(columnDescriptor));
+ }
+
public static class InjectAbortOnLoadListener
implements ProcedureExecutor.ProcedureExecutorListener {
private final ProcedureExecutor<MasterProcedureEnv> procExec;
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java
new file mode 100644
index 0000000..1490aa1
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java
@@ -0,0 +1,246 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestAddColumnFamilyProcedure {
+ private static final Log LOG = LogFactory.getLog(TestAddColumnFamilyProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testAddColumnFamily() throws Exception {
+ final TableName tableName = TableName.valueOf("testAddColumnFamily");
+ final String cf1 = "cf1";
+ final String cf2 = "cf2";
+ final HColumnDescriptor columnDescriptor1 = new HColumnDescriptor(cf1);
+ final HColumnDescriptor columnDescriptor2 = new HColumnDescriptor(cf2);
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f3");
+
+ // Test 1: Add a column family online
+ long procId1 =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor1));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf1);
+
+ // Test 2: Add a column family offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ long procId2 =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor2));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+ MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf2);
+ }
+
+ @Test(timeout=60000)
+ public void testAddSameColumnFamilyTwice() throws Exception {
+ final TableName tableName = TableName.valueOf("testAddColumnFamilyTwice");
+ final String cf2 = "cf2";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf2);
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1");
+
+ // add the column family
+ long procId1 =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+ MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf2);
+
+ // add the column family that exists
+ long procId2 =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+
+ // Second add should fail with InvalidFamilyOperationException
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Add failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException);
+
+ // Do the same add the existing column family - this time offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ long procId3 =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId3);
+
+ // Second add should fail with InvalidFamilyOperationException
+ result = procExec.getResult(procId3);
+ assertTrue(result.isFailed());
+ LOG.debug("Add failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
+ final String cf4 = "cf4";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4);
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the AddColumnFamily procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = AddColumnFamilyState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ AddColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf4);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
+ final String cf5 = "cf5";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf5);
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3");
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the AddColumnFamily procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = AddColumnFamilyState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ AddColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf5);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String cf6 = "cf6";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf6);
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the AddColumnFamily procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName,
+ columnDescriptor));
+
+ int numberOfSteps = AddColumnFamilyState.values().length - 2; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps,
+ AddColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf6);
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
[50/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
Merge branch 'apache/master' (4/16/15) into hbase-11339
API conflicts and test fixes
Update LoadTestTool.COLUMN_FAMILY -> DEFAULT_COLUMN_FAMILY due HBASE-11842
Use new 1.0+ api in some tests
Use updated Scanners internal api
Fix to take into account HBASE-13203 - procedure v2 table delete
Conflicts:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e20bbf6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e20bbf6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e20bbf6
Branch: refs/heads/hbase-11339
Commit: 0e20bbf6a3f1b8390c040230cc8b28e777c1d1f2
Parents: eba8a70 ddab472
Author: Jonathan M Hsieh <jm...@apache.org>
Authored: Fri May 1 08:18:51 2015 -0700
Committer: Jonathan M Hsieh <jm...@apache.org>
Committed: Fri May 1 08:18:51 2015 -0700
----------------------------------------------------------------------
.gitignore | 1 +
bin/hbase | 22 +-
bin/hbase-cleanup.sh | 12 +-
bin/hbase-config.sh | 6 +-
bin/hbase.cmd | 7 +-
bin/local-master-backup.sh | 8 +-
bin/local-regionservers.sh | 8 +-
bin/region_mover.rb | 6 +-
bin/region_status.rb | 8 +-
conf/hbase-env.cmd | 5 +-
conf/hbase-env.sh | 12 +-
conf/log4j.properties | 6 +-
dev-support/check_compatibility.sh | 16 +-
dev-support/checkstyle_report.py | 0
dev-support/findHangingTests.py | 0
dev-support/jenkinsEnv.sh | 2 +-
dev-support/publish_hbase_website.sh | 32 +-
dev-support/test-patch.properties | 9 +-
dev-support/test-patch.sh | 167 +-
hbase-annotations/src/main/asciidoc/.gitignore | 0
hbase-assembly/src/main/asciidoc/.gitignore | 0
hbase-checkstyle/src/main/asciidoc/.gitignore | 0
hbase-client/pom.xml | 49 +-
hbase-client/src/main/asciidoc/.gitignore | 0
.../org/apache/hadoop/hbase/ClusterStatus.java | 67 +-
.../hadoop/hbase/CoprocessorEnvironment.java | 6 +-
.../apache/hadoop/hbase/HColumnDescriptor.java | 26 +-
.../apache/hadoop/hbase/HTableDescriptor.java | 36 +
.../apache/hadoop/hbase/MetaTableAccessor.java | 451 +-
.../org/apache/hadoop/hbase/RegionLoad.java | 10 +
.../apache/hadoop/hbase/RegionLocations.java | 28 +
.../org/apache/hadoop/hbase/ServerLoad.java | 27 +-
.../hbase/client/AbstractClientScanner.java | 15 +-
.../org/apache/hadoop/hbase/client/Admin.java | 7 +
.../hadoop/hbase/client/AsyncProcess.java | 6 +-
.../hadoop/hbase/client/BufferedMutator.java | 4 +-
.../hbase/client/BufferedMutatorImpl.java | 2 +-
.../hadoop/hbase/client/ClientScanner.java | 460 +-
.../client/ClientSmallReversedScanner.java | 182 +-
.../hadoop/hbase/client/ClientSmallScanner.java | 228 +-
.../hadoop/hbase/client/ClusterConnection.java | 8 +-
.../apache/hadoop/hbase/client/Connection.java | 13 +-
.../hadoop/hbase/client/ConnectionAdapter.java | 7 +-
.../hadoop/hbase/client/ConnectionFactory.java | 15 +-
.../hbase/client/ConnectionImplementation.java | 2248 +++
.../hadoop/hbase/client/ConnectionManager.java | 2644 ---
.../hadoop/hbase/client/ConnectionUtils.java | 9 +-
.../org/apache/hadoop/hbase/client/Delete.java | 4 +-
.../hbase/client/FlushRegionCallable.java | 102 +
.../org/apache/hadoop/hbase/client/Get.java | 1 +
.../apache/hadoop/hbase/client/HBaseAdmin.java | 890 +-
.../apache/hadoop/hbase/client/HConnection.java | 38 +-
.../hadoop/hbase/client/HConnectionKey.java | 146 -
.../hadoop/hbase/client/HConnectionManager.java | 324 -
.../hadoop/hbase/client/HRegionLocator.java | 27 +-
.../org/apache/hadoop/hbase/client/HTable.java | 198 +-
.../hadoop/hbase/client/HTableFactory.java | 51 -
.../hbase/client/HTableInterfaceFactory.java | 54 -
.../apache/hadoop/hbase/client/Increment.java | 20 +
.../apache/hadoop/hbase/client/MetaScanner.java | 425 -
.../apache/hadoop/hbase/client/MultiAction.java | 2 +-
.../org/apache/hadoop/hbase/client/Put.java | 46 +-
.../client/RegionAdminServiceCallable.java | 63 +-
.../hadoop/hbase/client/RegionReplicaUtil.java | 56 +
.../org/apache/hadoop/hbase/client/Result.java | 152 +-
.../RetryingCallerInterceptorFactory.java | 2 +-
.../hbase/client/ReversedClientScanner.java | 33 +-
.../hbase/client/ReversedScannerCallable.java | 2 -
.../hbase/client/RpcRetryingCallerImpl.java | 11 +-
.../org/apache/hadoop/hbase/client/Scan.java | 93 +-
.../hadoop/hbase/client/ScannerCallable.java | 46 +-
.../client/ScannerCallableWithReplicas.java | 54 +-
.../org/apache/hadoop/hbase/client/Table.java | 5 +
.../client/ZooKeeperKeepAliveConnection.java | 4 +-
.../hadoop/hbase/client/ZooKeeperRegistry.java | 8 +-
.../hbase/client/metrics/ScanMetrics.java | 11 +-
.../client/replication/ReplicationAdmin.java | 240 +-
.../exceptions/ConnectionClosingException.java | 2 +-
.../exceptions/PreemptiveFastFailException.java | 2 +-
.../apache/hadoop/hbase/executor/EventType.java | 31 +-
.../hadoop/hbase/executor/ExecutorType.java | 3 +-
.../hadoop/hbase/filter/ColumnRangeFilter.java | 2 +-
.../hadoop/hbase/filter/FamilyFilter.java | 2 +-
.../org/apache/hadoop/hbase/ipc/AsyncCall.java | 9 +-
.../hadoop/hbase/ipc/AsyncRpcChannel.java | 316 +-
.../apache/hadoop/hbase/ipc/AsyncRpcClient.java | 174 +-
.../hbase/ipc/AsyncServerResponseHandler.java | 16 +-
.../org/apache/hadoop/hbase/ipc/IPCUtil.java | 53 +-
.../apache/hadoop/hbase/ipc/RpcClientImpl.java | 7 +-
.../hadoop/hbase/protobuf/ProtobufUtil.java | 72 +-
.../hadoop/hbase/protobuf/RequestConverter.java | 36 +
.../hbase/protobuf/ResponseConverter.java | 12 +-
.../apache/hadoop/hbase/protobuf/package.html | 2 +-
.../hbase/quotas/ThrottlingException.java | 11 +-
.../hbase/replication/ReplicationLoadSink.java | 36 +
.../replication/ReplicationLoadSource.java | 53 +
.../hadoop/hbase/security/EncryptionUtil.java | 47 +-
.../hbase/security/SaslClientHandler.java | 59 +-
.../security/access/AccessControlClient.java | 121 +-
.../hbase/snapshot/SnapshotExistsException.java | 3 +
.../apache/hadoop/hbase/zookeeper/ZKConfig.java | 103 +-
.../apache/hadoop/hbase/zookeeper/ZKUtil.java | 107 +-
.../hadoop/hbase/zookeeper/ZkAclReset.java | 110 +
.../hbase/zookeeper/ZooKeeperWatcher.java | 7 +
.../hadoop/hbase/TestRegionLocations.java | 57 +-
.../hadoop/hbase/client/TestAsyncProcess.java | 4 +-
.../client/TestClientExponentialBackoff.java | 4 +
.../hbase/client/TestClientNoCluster.java | 57 +-
.../hadoop/hbase/client/TestClientScanner.java | 489 +
.../client/TestClientSmallReversedScanner.java | 349 +
.../hbase/client/TestClientSmallScanner.java | 339 +
.../org/apache/hadoop/hbase/client/TestGet.java | 7 +
.../hbase/client/TestProcedureFuture.java | 186 +
.../hbase/client/TestSnapshotFromAdmin.java | 8 +-
.../hadoop/hbase/filter/TestLongComparator.java | 4 +
.../hbase/security/TestEncryptionUtil.java | 49 +-
.../src/test/resources/log4j.properties | 2 +-
hbase-common/pom.xml | 49 +-
hbase-common/src/main/asciidoc/.gitignore | 0
.../java/org/apache/hadoop/hbase/AuthUtil.java | 6 +-
.../org/apache/hadoop/hbase/CellComparator.java | 4 +-
.../java/org/apache/hadoop/hbase/CellUtil.java | 19 +
.../org/apache/hadoop/hbase/ChoreService.java | 4 +-
.../apache/hadoop/hbase/HBaseConfiguration.java | 2 +-
.../org/apache/hadoop/hbase/HConstants.java | 51 +-
.../java/org/apache/hadoop/hbase/KeyValue.java | 163 +-
.../org/apache/hadoop/hbase/KeyValueUtil.java | 2 +-
.../org/apache/hadoop/hbase/ScheduledChore.java | 45 +-
.../java/org/apache/hadoop/hbase/Stoppable.java | 4 +-
.../java/org/apache/hadoop/hbase/TableName.java | 38 +-
.../hbase/exceptions/TimeoutIOException.java | 46 +
.../hadoop/hbase/io/BoundedByteBufferPool.java | 113 +
.../hadoop/hbase/io/ByteBufferOutputStream.java | 32 +-
.../org/apache/hadoop/hbase/io/TimeRange.java | 1 +
.../hadoop/hbase/io/crypto/Encryption.java | 31 +-
.../io/encoding/BufferedDataBlockEncoder.java | 5 -
.../hbase/io/encoding/DataBlockEncoder.java | 21 -
.../hadoop/hbase/io/util/StreamUtils.java | 12 +-
.../org/apache/hadoop/hbase/security/User.java | 2 +
.../org/apache/hadoop/hbase/util/Bytes.java | 16 +-
.../hadoop/hbase/util/ForeignExceptionUtil.java | 109 +
.../java/org/apache/hadoop/hbase/util/Hash.java | 2 +-
.../apache/hadoop/hbase/util/JenkinsHash.java | 2 +-
.../apache/hadoop/hbase/util/MurmurHash.java | 2 +-
.../apache/hadoop/hbase/util/MurmurHash3.java | 2 +-
.../apache/hadoop/hbase/util/PrettyPrinter.java | 2 +-
.../apache/hadoop/hbase/util/RetryCounter.java | 4 +-
.../org/apache/hadoop/hbase/util/Threads.java | 92 +-
.../src/main/resources/hbase-default.xml | 77 +-
.../apache/hadoop/hbase/TestChoreService.java | 71 +-
.../org/apache/hadoop/hbase/TestKeyValue.java | 235 +
.../hbase/io/TestBoundedByteBufferPool.java | 88 +
.../hbase/io/crypto/TestCipherProvider.java | 8 +-
.../hadoop/hbase/io/crypto/TestEncryption.java | 7 +-
.../org/apache/hadoop/hbase/util/TestBytes.java | 14 +
.../src/test/resources/log4j.properties | 2 +-
hbase-examples/pom.xml | 64 +-
hbase-examples/src/main/asciidoc/.gitignore | 0
.../coprocessor/example/BulkDeleteEndpoint.java | 13 +-
.../hadoop/hbase/thrift/HttpDoAsClient.java | 27 +-
hbase-hadoop-compat/pom.xml | 51 +-
.../src/main/asciidoc/.gitignore | 0
.../hbase/ipc/MetricsHBaseServerSource.java | 6 +-
.../regionserver/MetricsRegionServerSource.java | 12 +-
.../MetricsRegionServerWrapper.java | 6 +-
.../MetricsReplicationSinkSource.java | 1 +
.../MetricsReplicationSourceSource.java | 1 +
hbase-hadoop2-compat/pom.xml | 40 +-
.../src/main/asciidoc/.gitignore | 0
.../hbase/ipc/MetricsHBaseServerSourceImpl.java | 11 +-
.../hbase/master/MetricsMasterSourceImpl.java | 3 +-
.../MetricsRegionAggregateSourceImpl.java | 3 +-
.../MetricsRegionServerSourceImpl.java | 15 +-
.../MetricsReplicationGlobalSourceSource.java | 5 +
.../MetricsReplicationSinkSourceImpl.java | 5 +
.../MetricsReplicationSourceSourceImpl.java | 5 +
hbase-it/pom.xml | 63 +-
hbase-it/src/main/asciidoc/.gitignore | 0
.../hadoop/hbase/IntegrationTestBase.java | 15 +-
.../hadoop/hbase/IntegrationTestIngest.java | 35 +-
.../IntegrationTestIngestStripeCompactions.java | 11 +-
.../IntegrationTestIngestWithEncryption.java | 8 +-
.../hbase/IntegrationTestIngestWithMOB.java | 9 +-
...IntegrationTestRegionReplicaReplication.java | 226 +
.../hadoop/hbase/RESTApiClusterManager.java | 350 +
.../hbase/chaos/actions/RemoveColumnAction.java | 2 +-
.../hadoop/hbase/mttr/IntegrationTestMTTR.java | 17 +-
.../test/IntegrationTestBigLinkedList.java | 228 +-
...egrationTestBigLinkedListWithVisibility.java | 11 +-
.../hbase/test/IntegrationTestReplication.java | 417 +
hbase-prefix-tree/pom.xml | 49 +-
hbase-prefix-tree/src/main/asciidoc/.gitignore | 0
.../codec/prefixtree/PrefixTreeSeeker.java | 29 -
hbase-procedure/pom.xml | 181 +
.../hbase/procedure2/OnePhaseProcedure.java | 28 +
.../hadoop/hbase/procedure2/Procedure.java | 680 +
.../procedure2/ProcedureAbortedException.java | 42 +
.../hbase/procedure2/ProcedureException.java | 45 +
.../hbase/procedure2/ProcedureExecutor.java | 1077 ++
.../procedure2/ProcedureFairRunQueues.java | 174 +
.../hbase/procedure2/ProcedureResult.java | 95 +
.../hbase/procedure2/ProcedureRunnableSet.java | 78 +
.../procedure2/ProcedureSimpleRunQueue.java | 121 +
.../procedure2/ProcedureYieldException.java | 40 +
.../procedure2/RemoteProcedureException.java | 116 +
.../hbase/procedure2/RootProcedureState.java | 185 +
.../hbase/procedure2/SequentialProcedure.java | 81 +
.../hbase/procedure2/StateMachineProcedure.java | 166 +
.../hbase/procedure2/TwoPhaseProcedure.java | 28 +
.../hbase/procedure2/store/ProcedureStore.java | 121 +
.../procedure2/store/ProcedureStoreTracker.java | 548 +
.../CorruptedWALProcedureStoreException.java | 43 +
.../procedure2/store/wal/ProcedureWALFile.java | 152 +
.../store/wal/ProcedureWALFormat.java | 234 +
.../store/wal/ProcedureWALFormatReader.java | 166 +
.../procedure2/store/wal/WALProcedureStore.java | 721 +
.../hadoop/hbase/procedure2/util/ByteSlot.java | 111 +
.../hbase/procedure2/util/StringUtils.java | 80 +
.../procedure2/util/TimeoutBlockingQueue.java | 217 +
.../procedure2/ProcedureTestingUtility.java | 163 +
.../procedure2/TestProcedureExecution.java | 338 +
.../procedure2/TestProcedureFairRunQueues.java | 155 +
.../hbase/procedure2/TestProcedureRecovery.java | 488 +
.../procedure2/TestProcedureReplayOrder.java | 226 +
.../store/TestProcedureStoreTracker.java | 193 +
.../store/wal/TestWALProcedureStore.java | 267 +
.../util/TestTimeoutBlockingQueue.java | 137 +
hbase-protocol/README.txt | 2 +-
hbase-protocol/pom.xml | 2 +
hbase-protocol/src/main/asciidoc/.gitignore | 0
.../hbase/protobuf/generated/AdminProtos.java | 1557 +-
.../hbase/protobuf/generated/ClientProtos.java | 870 +-
.../protobuf/generated/ClusterStatusProtos.java | 10280 +++++++----
.../generated/MasterProcedureProtos.java | 11424 ++++++++++++
.../hbase/protobuf/generated/MasterProtos.java | 15506 ++++++++++-------
.../protobuf/generated/ProcedureProtos.java | 7219 ++++++++
.../hbase/protobuf/generated/RPCProtos.java | 1664 +-
.../generated/RegionServerStatusProtos.java | 809 +-
.../hbase/protobuf/generated/WALProtos.java | 317 +-
.../protobuf/generated/ZooKeeperProtos.java | 1417 +-
hbase-protocol/src/main/protobuf/Admin.proto | 13 +
hbase-protocol/src/main/protobuf/Client.proto | 22 +
.../src/main/protobuf/ClusterStatus.proto | 43 +
hbase-protocol/src/main/protobuf/Master.proto | 39 +
.../src/main/protobuf/MasterProcedure.proto | 185 +
.../src/main/protobuf/Procedure.proto | 114 +
hbase-protocol/src/main/protobuf/RPC.proto | 11 +
.../src/main/protobuf/RegionServerStatus.proto | 8 +-
hbase-protocol/src/main/protobuf/WAL.proto | 3 +
.../src/main/protobuf/ZooKeeper.proto | 17 -
hbase-rest/pom.xml | 30 +
hbase-rest/src/main/asciidoc/.gitignore | 0
.../apache/hadoop/hbase/rest/RESTServer.java | 6 +-
.../apache/hadoop/hbase/rest/RESTServlet.java | 4 +-
.../hadoop/hbase/rest/RESTServletContainer.java | 3 +-
.../hadoop/hbase/rest/RegionsResource.java | 6 +-
.../apache/hadoop/hbase/rest/RowResource.java | 4 +-
.../org/apache/hadoop/hbase/rest/RowSpec.java | 8 +-
.../hadoop/hbase/rest/SchemaResource.java | 27 +-
.../apache/hadoop/hbase/rest/TableResource.java | 3 +-
.../hadoop/hbase/rest/client/RemoteHTable.java | 79 +-
.../hbase/rest/model/ColumnSchemaModel.java | 10 +-
.../hadoop/hbase/rest/model/ScannerModel.java | 2 +-
.../rest/model/StorageClusterStatusModel.java | 4 +-
.../hbase/rest/model/TableRegionModel.java | 2 +-
.../hbase/rest/model/TableSchemaModel.java | 16 +-
.../hadoop/hbase/rest/RowResourceBase.java | 1 -
.../hadoop/hbase/rest/TestGzipFilter.java | 1 -
.../hbase/rest/TestScannersWithFilters.java | 1 -
.../hadoop/hbase/rest/TestStatusResource.java | 1 -
.../hadoop/hbase/rest/TestTableResource.java | 54 +-
hbase-rest/src/test/resources/log4j.properties | 2 +-
hbase-server/pom.xml | 71 +-
hbase-server/src/main/asciidoc/.gitignore | 0
.../hbase/tmpl/master/MasterStatusTmpl.jamon | 23 +-
.../tmpl/master/RegionServerListTmpl.jamon | 1 -
.../tmpl/regionserver/BlockCacheTmpl.jamon | 62 +-
.../tmpl/regionserver/RegionListTmpl.jamon | 4 +-
.../tmpl/regionserver/ServerMetricsTmpl.jamon | 2 +
.../apache/hadoop/hbase/LocalHBaseCluster.java | 17 +-
.../java/org/apache/hadoop/hbase/Server.java | 4 +-
.../hbase/client/ClientSideRegionScanner.java | 5 +-
.../hbase/client/CoprocessorHConnection.java | 13 +-
.../hadoop/hbase/client/HTableWrapper.java | 57 +-
.../hbase/client/TableSnapshotScanner.java | 4 +-
.../SplitLogWorkerCoordination.java | 4 +-
.../ZKSplitLogManagerCoordination.java | 32 +-
.../ZkSplitLogWorkerCoordination.java | 20 +-
.../coprocessor/AggregateImplementation.java | 10 +-
.../hbase/coprocessor/BaseRegionObserver.java | 6 +-
.../coprocessor/BaseRegionServerObserver.java | 18 +-
.../coprocessor/BaseRowProcessorEndpoint.java | 8 +-
.../hbase/coprocessor/CoprocessorHost.java | 23 +-
.../RegionCoprocessorEnvironment.java | 4 +-
.../hbase/coprocessor/RegionObserver.java | 14 +-
.../hbase/coprocessor/RegionServerObserver.java | 14 +-
.../org/apache/hadoop/hbase/io/FileLink.java | 23 +-
.../hadoop/hbase/io/HalfStoreFileReader.java | 41 +-
.../hbase/io/hfile/AbstractHFileReader.java | 352 -
.../hbase/io/hfile/AbstractHFileWriter.java | 266 -
.../hbase/io/hfile/BlockWithScanInfo.java | 8 +-
.../hadoop/hbase/io/hfile/CacheConfig.java | 82 +-
.../hbase/io/hfile/CombinedBlockCache.java | 47 +-
.../hadoop/hbase/io/hfile/FixedFileTrailer.java | 6 +-
.../org/apache/hadoop/hbase/io/hfile/HFile.java | 83 +-
.../hadoop/hbase/io/hfile/HFileBlock.java | 153 +-
.../hadoop/hbase/io/hfile/HFileBlockIndex.java | 10 +-
.../hbase/io/hfile/HFilePrettyPrinter.java | 2 +-
.../hadoop/hbase/io/hfile/HFileReaderImpl.java | 1648 ++
.../hadoop/hbase/io/hfile/HFileReaderV2.java | 1318 --
.../hadoop/hbase/io/hfile/HFileReaderV3.java | 358 -
.../hadoop/hbase/io/hfile/HFileScanner.java | 91 +-
.../hbase/io/hfile/HFileWriterFactory.java | 40 +
.../hadoop/hbase/io/hfile/HFileWriterImpl.java | 641 +
.../hadoop/hbase/io/hfile/HFileWriterV2.java | 424 -
.../hadoop/hbase/io/hfile/HFileWriterV3.java | 136 -
.../io/hfile/InclusiveCombinedBlockCache.java | 58 +
.../hadoop/hbase/io/hfile/LruBlockCache.java | 37 +-
.../hbase/io/hfile/MemcachedBlockCache.java | 272 +
.../hbase/io/hfile/bucket/BucketCache.java | 216 +-
.../hbase/io/hfile/bucket/CachedEntryQueue.java | 20 +-
.../org/apache/hadoop/hbase/ipc/CallRunner.java | 15 +-
.../hadoop/hbase/ipc/MetricsHBaseServer.java | 4 +
.../apache/hadoop/hbase/ipc/RequestContext.java | 153 -
.../apache/hadoop/hbase/ipc/RpcCallContext.java | 27 +
.../org/apache/hadoop/hbase/ipc/RpcServer.java | 141 +-
.../hadoop/hbase/mapred/TableInputFormat.java | 18 +-
.../hbase/mapred/TableInputFormatBase.java | 189 +-
.../hbase/mapreduce/HFileOutputFormat2.java | 16 +-
.../hbase/mapreduce/LoadIncrementalHFiles.java | 196 +-
.../hbase/mapreduce/TableInputFormat.java | 4 +-
.../hbase/mapreduce/TableInputFormatBase.java | 133 +-
.../hbase/mapreduce/TableRecordReaderImpl.java | 11 +-
.../replication/VerifyReplication.java | 30 +-
.../hadoop/hbase/master/AssignmentManager.java | 13 +-
.../hadoop/hbase/master/CatalogJanitor.java | 27 +-
.../org/apache/hadoop/hbase/master/HMaster.java | 399 +-
.../hadoop/hbase/master/HMasterCommandLine.java | 52 +-
.../hbase/master/MasterCoprocessorHost.java | 12 +-
.../hadoop/hbase/master/MasterFileSystem.java | 10 +-
.../hadoop/hbase/master/MasterRpcServices.java | 95 +-
.../hadoop/hbase/master/MasterServices.java | 15 +-
.../hadoop/hbase/master/RegionStateStore.java | 4 +-
.../hadoop/hbase/master/RegionStates.java | 20 +-
.../hadoop/hbase/master/ServerManager.java | 112 +-
.../hadoop/hbase/master/SplitLogManager.java | 2 +-
.../hadoop/hbase/master/TableLockManager.java | 6 +-
.../hbase/master/TableNamespaceManager.java | 36 +-
.../hadoop/hbase/master/TableStateManager.java | 1 -
.../master/balancer/LoadBalancerFactory.java | 11 +-
.../master/handler/CreateTableHandler.java | 7 +-
.../master/handler/DeleteTableHandler.java | 18 +-
.../master/handler/DisableTableHandler.java | 11 +-
.../master/handler/EnableTableHandler.java | 6 +-
.../master/handler/ModifyTableHandler.java | 9 +-
.../handler/TableDeleteFamilyHandler.java | 6 +-
.../procedure/AddColumnFamilyProcedure.java | 407 +
.../master/procedure/CreateTableProcedure.java | 442 +
.../procedure/DeleteColumnFamilyProcedure.java | 439 +
.../master/procedure/DeleteTableProcedure.java | 450 +
.../master/procedure/DisableTableProcedure.java | 540 +
.../master/procedure/EnableTableProcedure.java | 582 +
.../procedure/MasterDDLOperationHelper.java | 167 +
.../procedure/MasterProcedureConstants.java | 31 +
.../master/procedure/MasterProcedureEnv.java | 123 +
.../master/procedure/MasterProcedureQueue.java | 448 +
.../master/procedure/MasterProcedureUtil.java | 56 +
.../procedure/ModifyColumnFamilyProcedure.java | 382 +
.../master/procedure/ModifyTableProcedure.java | 510 +
.../master/procedure/ProcedurePrepareLatch.java | 105 +
.../master/procedure/ProcedureSyncWait.java | 179 +
.../procedure/TableProcedureInterface.java | 48 +
.../procedure/TruncateTableProcedure.java | 291 +
.../hbase/master/snapshot/SnapshotManager.java | 4 +-
.../hadoop/hbase/mob/DefaultMobCompactor.java | 16 +-
.../hbase/mob/DefaultMobStoreFlusher.java | 18 +-
.../PartitionedMobFileCompactor.java | 31 +-
.../hbase/mob/mapreduce/MemStoreWrapper.java | 12 +-
.../hbase/mob/mapreduce/SweepReducer.java | 13 +-
.../hbase/namespace/NamespaceAuditor.java | 15 -
.../hbase/namespace/NamespaceStateManager.java | 49 +-
.../namespace/NamespaceTableAndRegionInfo.java | 2 +-
.../hbase/procedure/ZKProcedureMemberRpcs.java | 2 +-
.../procedure/flush/FlushTableSubprocedure.java | 15 +-
.../RegionServerFlushTableProcedureManager.java | 6 +-
.../hadoop/hbase/quotas/MasterQuotaManager.java | 15 +-
.../hbase/quotas/RegionServerQuotaManager.java | 15 +-
.../hbase/quotas/RegionStateListener.java | 2 +-
.../hbase/quotas/RegionStateListener.java~HEAD | 54 -
.../quotas/RegionStateListener.java~HEAD_0 | 54 -
.../quotas/RegionStateListener.java~jon_master | 54 -
.../quotas/RegionStateListener.java~master | 54 -
.../hadoop/hbase/quotas/TimeBasedLimiter.java | 9 +-
.../AnnotationReadingPriorityFunction.java | 5 +-
.../hbase/regionserver/CompactSplitThread.java | 60 +-
.../hbase/regionserver/CompactionRequestor.java | 14 +-
.../ConstantSizeRegionSplitPolicy.java | 8 +-
.../hbase/regionserver/DefaultMemStore.java | 16 +-
.../hbase/regionserver/DefaultStoreFlusher.java | 2 +-
.../regionserver/ExplicitColumnTracker.java | 17 +-
.../regionserver/FavoredNodesForRegion.java | 7 +-
.../regionserver/FlushRequestListener.java | 2 +-
.../hbase/regionserver/FlushRequester.java | 8 +-
.../hadoop/hbase/regionserver/HRegion.java | 2359 ++-
.../hbase/regionserver/HRegionFileSystem.java | 10 +-
.../hbase/regionserver/HRegionServer.java | 370 +-
.../hadoop/hbase/regionserver/HStore.java | 144 +-
.../hbase/regionserver/HeapMemoryManager.java | 2 +-
...IncreasingToUpperBoundRegionSplitPolicy.java | 4 +-
.../hbase/regionserver/InternalScanner.java | 9 +-
.../hadoop/hbase/regionserver/KeyValueHeap.java | 49 +-
.../hbase/regionserver/KeyValueScanner.java | 6 +
.../hbase/regionserver/LastSequenceId.java | 10 +-
.../hadoop/hbase/regionserver/LogRoller.java | 2 +-
.../hadoop/hbase/regionserver/MemStore.java | 6 +
.../hbase/regionserver/MemStoreFlusher.java | 170 +-
.../hbase/regionserver/MetricsRegionServer.java | 4 +
.../MetricsRegionServerWrapperImpl.java | 42 +-
.../hbase/regionserver/MobStoreScanner.java | 4 +-
.../regionserver/NoLimitScannerContext.java | 102 +
.../regionserver/NonLazyKeyValueScanner.java | 4 +
.../hbase/regionserver/OnlineRegions.java | 27 +-
.../hbase/regionserver/RSRpcServices.java | 324 +-
.../hadoop/hbase/regionserver/Region.java | 678 +
.../regionserver/RegionCoprocessorHost.java | 29 +-
.../hbase/regionserver/RegionMergeRequest.java | 13 +-
.../regionserver/RegionMergeTransaction.java | 644 +-
.../RegionMergeTransactionFactory.java | 76 +
.../RegionMergeTransactionImpl.java | 702 +
.../hbase/regionserver/RegionScanner.java | 44 +-
.../RegionServerCoprocessorHost.java | 15 +-
.../regionserver/RegionServerServices.java | 13 +-
.../hbase/regionserver/RegionSplitPolicy.java | 6 +-
.../hbase/regionserver/ReplicationService.java | 8 +-
.../regionserver/ReversedMobStoreScanner.java | 4 +-
.../hbase/regionserver/ScanQueryMatcher.java | 44 +-
.../hbase/regionserver/ScannerContext.java | 527 +
.../hadoop/hbase/regionserver/SplitRequest.java | 20 +-
.../hbase/regionserver/SplitTransaction.java | 758 +-
.../regionserver/SplitTransactionFactory.java | 74 +
.../regionserver/SplitTransactionImpl.java | 789 +
.../apache/hadoop/hbase/regionserver/Store.java | 38 +-
.../hadoop/hbase/regionserver/StoreFile.java | 7 +-
.../hbase/regionserver/StoreFileScanner.java | 8 +-
.../hbase/regionserver/StoreFlushContext.java | 16 +
.../hadoop/hbase/regionserver/StoreFlusher.java | 8 +-
.../hadoop/hbase/regionserver/StoreScanner.java | 102 +-
.../regionserver/StorefileRefresherChore.java | 10 +-
.../regionserver/compactions/Compactor.java | 10 +-
.../handler/CloseRegionHandler.java | 4 +-
.../handler/FinishRegionRecoveringHandler.java | 56 +
.../handler/RegionReplicaFlushHandler.java | 187 +
.../snapshot/FlushSnapshotSubprocedure.java | 16 +-
.../snapshot/RegionServerSnapshotManager.java | 12 +-
.../hbase/regionserver/wal/FSWALEntry.java | 30 +-
.../regionserver/wal/ProtobufLogReader.java | 15 +-
.../hbase/regionserver/wal/ReplayHLogKey.java | 53 +
.../wal/SecureProtobufLogReader.java | 8 +-
.../wal/SecureProtobufLogWriter.java | 5 +-
.../hadoop/hbase/regionserver/wal/WALEdit.java | 13 +-
.../hbase/replication/BaseWALEntryFilter.java | 29 +
.../hbase/replication/ReplicationEndpoint.java | 9 +-
.../HBaseInterClusterReplicationEndpoint.java | 8 +-
.../replication/regionserver/MetricsSink.java | 17 +
.../replication/regionserver/MetricsSource.java | 36 +-
.../RegionReplicaReplicationEndpoint.java | 349 +-
.../replication/regionserver/Replication.java | 32 +-
.../regionserver/ReplicationLoad.java | 151 +
.../regionserver/ReplicationSink.java | 8 +
.../regionserver/ReplicationSource.java | 13 +-
.../regionserver/ReplicationSourceManager.java | 9 +-
.../security/access/AccessControlLists.java | 7 +-
.../hbase/security/access/AccessController.java | 462 +-
.../hbase/security/access/AuthResult.java | 81 +-
.../security/access/SecureBulkLoadEndpoint.java | 14 +-
.../hbase/security/access/TableAuthManager.java | 35 +-
.../security/access/ZKPermissionWatcher.java | 17 +
.../hbase/security/token/TokenProvider.java | 5 +-
.../DefaultVisibilityLabelServiceImpl.java | 9 +-
.../visibility/VisibilityController.java | 127 +-
.../security/visibility/VisibilityUtils.java | 10 +-
.../hadoop/hbase/snapshot/SnapshotManifest.java | 2 +-
.../org/apache/hadoop/hbase/tool/Canary.java | 440 +-
.../hadoop/hbase/tool/WriteSinkCoprocessor.java | 2 +-
.../hadoop/hbase/util/CompressionTest.java | 9 +-
.../hadoop/hbase/util/ConnectionCache.java | 34 +-
.../apache/hadoop/hbase/util/FSHDFSUtils.java | 14 +-
.../org/apache/hadoop/hbase/util/FSUtils.java | 55 +-
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 232 +-
.../org/apache/hadoop/hbase/util/HMerge.java | 33 +-
.../org/apache/hadoop/hbase/util/IdLock.java | 16 +
.../org/apache/hadoop/hbase/util/Merge.java | 4 +-
.../hadoop/hbase/util/ModifyRegionUtils.java | 24 +
.../hadoop/hbase/util/MultiHConnection.java | 8 +-
.../hadoop/hbase/util/RegionSizeCalculator.java | 5 +
.../hbase/util/ServerRegionReplicaUtil.java | 62 +-
.../hbase/util/hbck/OfflineMetaRepair.java | 2 +-
.../hbase/util/hbck/TableLockChecker.java | 6 +-
.../hadoop/hbase/wal/WALPrettyPrinter.java | 27 +-
.../apache/hadoop/hbase/wal/WALSplitter.java | 62 +-
.../hbase/zookeeper/MiniZooKeeperCluster.java | 113 +-
.../zookeeper/RecoveringRegionWatcher.java | 11 +-
.../hadoop/hbase/zookeeper/ZKSplitLog.java | 17 +-
.../hbase/zookeeper/ZooKeeperMainServer.java | 30 +-
.../hadoop/hbase/replication/package.html | 140 +-
.../resources/hbase-webapps/master/snapshot.jsp | 1 -
.../resources/hbase-webapps/master/table.jsp | 98 +-
.../org/apache/hadoop/hbase/HBaseTestCase.java | 12 +-
.../hadoop/hbase/HBaseTestingUtility.java | 197 +-
.../hbase/HFilePerformanceEvaluation.java | 4 +-
.../apache/hadoop/hbase/MiniHBaseCluster.java | 30 +-
.../hadoop/hbase/MockRegionServerServices.java | 17 +-
.../hadoop/hbase/PerformanceEvaluation.java | 193 +-
.../hadoop/hbase/ScanPerformanceEvaluation.java | 7 +-
.../ServerResourceCheckerJUnitListener.java | 15 -
.../apache/hadoop/hbase/TestAcidGuarantees.java | 2 -
.../hadoop/hbase/TestGlobalMemStoreSize.java | 16 +-
.../hadoop/hbase/TestHBaseTestingUtility.java | 115 +-
.../org/apache/hadoop/hbase/TestIOFencing.java | 8 +-
.../apache/hadoop/hbase/TestInfoServers.java | 68 +-
.../hadoop/hbase/TestMetaTableAccessor.java | 52 +-
.../hbase/TestMetaTableAccessorNoCluster.java | 4 +-
.../hadoop/hbase/TestMovedRegionsCleaner.java | 96 +
.../hbase/TestPartialResultsFromClientSide.java | 831 +
.../org/apache/hadoop/hbase/TestZooKeeper.java | 115 +-
.../hadoop/hbase/backup/TestHFileArchiving.java | 31 +-
.../TestZooKeeperTableArchiveClient.java | 16 +-
.../hbase/client/HConnectionTestingUtility.java | 70 +-
.../apache/hadoop/hbase/client/TestAdmin2.java | 52 +-
.../hadoop/hbase/client/TestCheckAndMutate.java | 14 +
.../hadoop/hbase/client/TestClientPushback.java | 11 +-
.../hadoop/hbase/client/TestFromClientSide.java | 249 +-
.../org/apache/hadoop/hbase/client/TestHCM.java | 306 +-
.../client/TestHTableMultiplexerFlushCache.java | 35 +-
.../hadoop/hbase/client/TestMetaScanner.java | 243 -
.../hbase/client/TestMetaWithReplicas.java | 33 +-
.../client/TestMobCloneSnapshotFromClient.java | 3 +-
.../TestMobRestoreSnapshotFromClient.java | 6 +-
.../TestMobSnapshotCloneIndependence.java | 17 +-
.../hbase/client/TestMobSnapshotFromClient.java | 4 +-
.../hadoop/hbase/client/TestMultiParallel.java | 2 +-
.../hadoop/hbase/client/TestReplicasClient.java | 90 +-
.../apache/hadoop/hbase/client/TestResult.java | 27 +
.../hbase/client/TestResultSizeEstimation.java | 127 +
.../client/TestScannersFromClientSide.java | 83 +
.../hadoop/hbase/client/TestSizeFailures.java | 176 +
.../TestReplicationAdminWithClusters.java | 163 +
.../coprocessor/ColumnAggregationEndpoint.java | 1 -
.../ColumnAggregationEndpointNullResponse.java | 8 +-
.../ColumnAggregationEndpointWithErrors.java | 6 +-
.../hbase/coprocessor/SimpleRegionObserver.java | 10 +-
.../hbase/coprocessor/TestClassLoading.java | 33 +-
.../TestCoprocessorConfiguration.java | 172 +
.../coprocessor/TestCoprocessorEndpoint.java | 6 +-
.../coprocessor/TestCoprocessorInterface.java | 77 +-
.../TestCoprocessorTableEndpoint.java | 182 +
.../hbase/coprocessor/TestHTableWrapper.java | 34 +-
.../hbase/coprocessor/TestMasterObserver.java | 112 +-
.../TestRegionObserverInterface.java | 37 +-
.../TestRegionObserverScannerOpenHook.java | 17 +-
.../coprocessor/TestRegionServerObserver.java | 30 +-
.../hbase/filter/TestColumnPrefixFilter.java | 19 +-
.../hbase/filter/TestDependentColumnFilter.java | 17 +-
.../apache/hadoop/hbase/filter/TestFilter.java | 29 +-
.../filter/TestInvocationRecordFilter.java | 11 +-
.../filter/TestMultipleColumnPrefixFilter.java | 25 +-
.../hadoop/hbase/fs/TestBlockReorder.java | 11 +-
.../hadoop/hbase/http/ssl/KeyStoreTestUtil.java | 69 +-
.../hbase/io/TestByteBufferOutputStream.java | 49 +
.../apache/hadoop/hbase/io/TestFileLink.java | 36 +
.../hbase/io/encoding/TestEncodedSeekers.java | 13 +-
.../hbase/io/encoding/TestPrefixTree.java | 33 +-
.../hadoop/hbase/io/hfile/CacheTestUtils.java | 6 +-
.../hadoop/hbase/io/hfile/TestCacheConfig.java | 4 +-
.../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 174 +-
.../hbase/io/hfile/TestFixedFileTrailer.java | 18 +-
.../io/hfile/TestForceCacheImportantBlocks.java | 10 +-
.../apache/hadoop/hbase/io/hfile/TestHFile.java | 4 +-
.../hbase/io/hfile/TestHFileBlockIndex.java | 2 +-
.../hbase/io/hfile/TestHFileEncryption.java | 4 +-
.../TestHFileInlineToRootChunkConversion.java | 7 +-
.../hadoop/hbase/io/hfile/TestHFileSeek.java | 2 +-
.../hbase/io/hfile/TestHFileWriterV2.java | 9 +-
.../hbase/io/hfile/TestHFileWriterV3.java | 9 +-
.../hfile/TestLazyDataBlockDecompression.java | 5 +-
.../hadoop/hbase/io/hfile/TestPrefetch.java | 6 +-
.../hadoop/hbase/io/hfile/TestReseekTo.java | 2 +-
.../TestScannerSelectionUsingKeyRange.java | 10 +-
.../io/hfile/TestScannerSelectionUsingTTL.java | 14 +-
.../hadoop/hbase/io/hfile/TestSeekTo.java | 11 +-
.../hbase/io/hfile/bucket/TestBucketCache.java | 87 +-
.../hadoop/hbase/ipc/AbstractTestIPC.java | 269 +
.../apache/hadoop/hbase/ipc/TestAsyncIPC.java | 298 +
.../apache/hadoop/hbase/ipc/TestCallRunner.java | 2 +-
.../hbase/ipc/TestGlobalEventLoopGroup.java | 54 +
.../org/apache/hadoop/hbase/ipc/TestIPC.java | 584 +-
.../apache/hadoop/hbase/ipc/TestRpcMetrics.java | 2 +
.../hbase/mapred/TestTableInputFormat.java | 267 +-
.../hbase/mapreduce/MapreduceTestingShim.java | 27 +-
.../hbase/mapreduce/TestHFileOutputFormat.java | 9 +-
.../hbase/mapreduce/TestHFileOutputFormat2.java | 9 +-
.../hbase/mapreduce/TestImportExport.java | 2 -
.../TestImportTSVWithOperationAttributes.java | 9 +-
.../hbase/mapreduce/TestImportTSVWithTTLs.java | 5 +-
.../mapreduce/TestLoadIncrementalHFiles.java | 85 +-
.../hbase/mapreduce/TestTableInputFormat.java | 487 +
.../hadoop/hbase/master/MockRegionServer.java | 20 +-
.../hbase/master/TestAssignmentListener.java | 8 +-
.../hadoop/hbase/master/TestCatalogJanitor.java | 24 +-
.../hbase/master/TestClockSkewDetection.java | 31 +-
.../master/TestDistributedLogSplitting.java | 7 +-
.../master/TestGetLastFlushedSequenceId.java | 25 +-
.../hbase/master/TestHMasterRPCException.java | 113 +-
.../hadoop/hbase/master/TestMasterFailover.java | 5 +-
.../hbase/master/TestRegionPlacement.java | 27 +-
.../hadoop/hbase/master/TestRestartCluster.java | 9 +-
.../hbase/master/TestTableLockManager.java | 33 +-
.../hbase/master/TestTableStateManager.java | 82 +
.../hadoop/hbase/master/TestWarmupRegion.java | 164 +
.../master/handler/TestEnableTableHandler.java | 160 +-
.../handler/TestTableDeleteFamilyHandler.java | 122 +-
.../TestTableDescriptorModification.java | 124 +-
.../MasterProcedureTestingUtility.java | 408 +
.../procedure/TestAddColumnFamilyProcedure.java | 246 +
.../procedure/TestCreateTableProcedure.java | 257 +
.../TestDeleteColumnFamilyProcedure.java | 302 +
.../procedure/TestDeleteTableProcedure.java | 208 +
.../procedure/TestDisableTableProcedure.java | 182 +
.../procedure/TestEnableTableProcedure.java | 193 +
.../TestMasterFailoverWithProcedures.java | 429 +
.../procedure/TestMasterProcedureQueue.java | 433 +
.../TestModifyColumnFamilyProcedure.java | 238 +
.../procedure/TestModifyTableProcedure.java | 403 +
.../procedure/TestTruncateTableProcedure.java | 246 +
.../apache/hadoop/hbase/mob/MobTestUtil.java | 7 +-
.../hadoop/hbase/mob/TestCachedMobFile.java | 36 +-
.../hbase/mob/TestDefaultMobStoreFlusher.java | 43 +-
.../hbase/mob/TestExpiredMobFileCleaner.java | 20 +-
.../hbase/mob/TestMobDataBlockEncoding.java | 24 +-
.../apache/hadoop/hbase/mob/TestMobFile.java | 22 +-
.../hadoop/hbase/mob/TestMobFileCache.java | 57 +-
.../filecompactions/TestMobFileCompactor.java | 85 +-
.../TestPartitionedMobFileCompactor.java | 13 +-
.../hbase/mob/mapreduce/TestMobSweepJob.java | 3 +-
.../mob/mapreduce/TestMobSweepReducer.java | 19 +-
.../hbase/mob/mapreduce/TestMobSweeper.java | 44 +-
.../hbase/namespace/TestNamespaceAuditor.java | 337 +-
.../hadoop/hbase/quotas/TestQuotaThrottle.java | 23 +-
.../regionserver/DataBlockEncodingTool.java | 7 +-
.../MetricsRegionServerWrapperStub.java | 5 +
.../regionserver/NoOpScanPolicyObserver.java | 2 +-
.../hbase/regionserver/TestAtomicOperation.java | 58 +-
.../hbase/regionserver/TestBlocksRead.java | 36 +-
.../hbase/regionserver/TestBlocksScanned.java | 12 +-
.../hadoop/hbase/regionserver/TestBulkLoad.java | 58 +-
.../regionserver/TestCacheOnWriteInSchema.java | 3 +-
.../hbase/regionserver/TestColumnSeeking.java | 30 +-
.../hbase/regionserver/TestCompaction.java | 6 +-
.../hbase/regionserver/TestCompactionState.java | 10 +-
.../hbase/regionserver/TestDefaultMemStore.java | 13 +-
.../hbase/regionserver/TestDeleteMobTable.java | 31 +-
.../regionserver/TestEncryptionKeyRotation.java | 19 +-
.../TestEncryptionRandomKeying.java | 9 +-
.../TestEndToEndSplitTransaction.java | 228 +-
.../regionserver/TestExplicitColumnTracker.java | 64 +-
.../regionserver/TestGetClosestAtOrBefore.java | 45 +-
.../hbase/regionserver/TestHMobStore.java | 2 +-
.../hadoop/hbase/regionserver/TestHRegion.java | 340 +-
.../regionserver/TestHRegionReplayEvents.java | 1576 ++
.../regionserver/TestHeapMemoryManager.java | 4 +-
.../hbase/regionserver/TestKeepDeletes.java | 121 +-
.../hbase/regionserver/TestMajorCompaction.java | 71 +-
.../regionserver/TestMetricsRegionServer.java | 1 +
.../hbase/regionserver/TestMinVersions.java | 36 +-
.../hbase/regionserver/TestMinorCompaction.java | 12 +-
.../hbase/regionserver/TestMobCompaction.java | 25 +-
.../hbase/regionserver/TestMobStoreScanner.java | 59 +-
.../regionserver/TestMultiColumnScanner.java | 8 +-
.../regionserver/TestPerColumnFamilyFlush.java | 228 +-
.../hbase/regionserver/TestQueryMatcher.java | 21 -
.../regionserver/TestRegionFavoredNodes.java | 10 +-
.../TestRegionMergeTransaction.java | 68 +-
.../TestRegionMergeTransactionOnCluster.java | 2 +-
.../regionserver/TestRegionReplicaFailover.java | 373 +
.../hbase/regionserver/TestRegionReplicas.java | 30 +-
.../regionserver/TestRegionServerHostname.java | 108 +
.../regionserver/TestRegionServerMetrics.java | 76 +-
.../regionserver/TestRegionServerNoMaster.java | 4 +-
.../TestRegionServerOnlineConfigChange.java | 2 +-
.../TestRegionServerReportForDuty.java | 183 +
.../regionserver/TestRegionSplitPolicy.java | 39 +-
.../regionserver/TestResettingCounters.java | 10 +-
.../regionserver/TestReversibleScanners.java | 10 +-
.../hbase/regionserver/TestRowTooBig.java | 10 +-
.../regionserver/TestScanWithBloomError.java | 8 +-
.../hadoop/hbase/regionserver/TestScanner.java | 18 +-
.../regionserver/TestSeekOptimizations.java | 8 +-
.../regionserver/TestSplitTransaction.java | 62 +-
.../TestSplitTransactionOnCluster.java | 71 +-
.../hbase/regionserver/TestStoreFileInfo.java | 28 +-
.../TestStoreFileRefresherChore.java | 23 +-
.../hbase/regionserver/TestStoreScanner.java | 2 +-
.../hbase/regionserver/TestStripeCompactor.java | 26 +-
.../hbase/regionserver/TestWideScanner.java | 4 +-
.../TestCompactionWithThroughputController.java | 7 +-
.../compactions/TestStripeCompactionPolicy.java | 9 +-
.../hbase/regionserver/wal/TestDurability.java | 23 +
.../hbase/regionserver/wal/TestFSHLog.java | 3 +-
.../hbase/regionserver/wal/TestLogRolling.java | 18 +-
.../hbase/regionserver/wal/TestWALReplay.java | 58 +-
.../replication/TestMasterReplication.java | 4 +-
.../replication/TestMultiSlaveReplication.java | 2 +-
.../replication/TestReplicationSmallTests.java | 45 +
.../TestRegionReplicaReplicationEndpoint.java | 134 +-
...egionReplicaReplicationEndpointNoMaster.java | 67 +-
.../hbase/security/HBaseKerberosUtils.java | 36 +-
.../hadoop/hbase/security/TestSecureRPC.java | 103 +-
.../TestUsersOperationsWithSecureHadoop.java | 62 +-
.../hbase/security/access/SecureTestUtil.java | 294 +-
.../access/TestAccessControlFilter.java | 2 +-
.../security/access/TestAccessController.java | 542 +-
.../security/access/TestAccessController2.java | 270 +-
.../access/TestCellACLWithMultipleVersions.java | 35 +-
.../hbase/security/access/TestCellACLs.java | 115 +-
.../security/access/TestNamespaceCommands.java | 161 +-
.../access/TestScanEarlyTermination.java | 3 +-
.../access/TestWithDisabledAuthorization.java | 1079 ++
.../token/TestGenerateDelegationToken.java | 173 +
.../security/token/TestTokenAuthentication.java | 5 +-
.../ExpAsStringVisibilityLabelServiceImpl.java | 10 +-
.../TestDefaultScanLabelGeneratorStack.java | 27 +-
.../TestEnforcingScanLabelGenerator.java | 22 +-
...sibilityLabelReplicationWithExpAsString.java | 30 +-
.../visibility/TestVisibilityLabels.java | 164 +-
.../TestVisibilityLabelsReplication.java | 36 +-
.../visibility/TestVisibilityLabelsWithACL.java | 41 +-
...ibilityLabelsWithDefaultVisLabelService.java | 11 +-
.../TestVisibilityLabelsWithDeletes.java | 306 +-
.../TestVisibilityLabelsWithSLGStack.java | 8 +-
.../TestVisibilityLablesWithGroups.java | 36 +-
.../TestVisibilityWithCheckAuths.java | 49 +-
.../TestWithDisabledAuthorization.java | 237 +
.../hbase/snapshot/MobSnapshotTestingUtils.java | 16 +-
.../hbase/snapshot/SnapshotTestingUtils.java | 6 +-
.../TestMobFlushSnapshotFromClient.java | 13 +-
.../TestMobRestoreFlushSnapshotFromClient.java | 5 +-
.../snapshot/TestMobRestoreSnapshotHelper.java | 4 -
.../snapshot/TestSnapshotClientRetries.java | 125 +
.../hadoop/hbase/util/ConstantDelayQueue.java | 196 +
.../util/LoadTestDataGeneratorWithMOB.java | 2 +-
.../apache/hadoop/hbase/util/LoadTestTool.java | 87 +-
.../hadoop/hbase/util/MultiThreadedAction.java | 4 +-
.../hbase/util/MultiThreadedWriterBase.java | 7 +-
.../hadoop/hbase/util/RestartMetaTest.java | 6 +-
.../hadoop/hbase/util/TestEncryptionTest.java | 6 +-
.../apache/hadoop/hbase/util/TestFSUtils.java | 19 +-
.../apache/hadoop/hbase/util/TestHBaseFsck.java | 194 +-
.../hbase/util/TestHBaseFsckEncryption.java | 14 +-
.../hadoop/hbase/util/TestMergeTable.java | 5 +-
.../apache/hadoop/hbase/util/TestMergeTool.java | 4 +-
.../apache/hadoop/hbase/util/TestTableName.java | 15 +-
.../hadoop/hbase/util/hbck/HbckTestingUtil.java | 37 +-
.../util/hbck/TestOfflineMetaRebuildBase.java | 1 -
.../hadoop/hbase/wal/TestWALFiltering.java | 6 +-
.../hadoop/hbase/zookeeper/TestHQuorumPeer.java | 9 +
.../zookeeper/TestRecoverableZooKeeper.java | 3 +-
.../zookeeper/TestZooKeeperMainServer.java | 10 +
.../src/test/resources/log4j.properties | 2 +-
hbase-shell/pom.xml | 62 +-
hbase-shell/src/main/asciidoc/.gitignore | 0
hbase-shell/src/main/ruby/hbase.rb | 5 +
hbase-shell/src/main/ruby/hbase/admin.rb | 67 +-
hbase-shell/src/main/ruby/hbase/hbase.rb | 6 +-
hbase-shell/src/main/ruby/hbase/quotas.rb | 10 +-
.../src/main/ruby/hbase/replication_admin.rb | 82 +-
hbase-shell/src/main/ruby/hbase/security.rb | 26 +-
hbase-shell/src/main/ruby/hbase/table.rb | 44 +-
.../src/main/ruby/hbase/visibility_labels.rb | 13 +-
hbase-shell/src/main/ruby/shell.rb | 4 +
hbase-shell/src/main/ruby/shell/commands.rb | 18 +-
.../src/main/ruby/shell/commands/add_peer.rb | 38 +-
.../ruby/shell/commands/balancer_enabled.rb | 41 +
.../src/main/ruby/shell/commands/clear_auths.rb | 5 +-
.../main/ruby/shell/commands/clone_snapshot.rb | 7 +
.../shell/commands/disable_table_replication.rb | 42 +
.../shell/commands/enable_table_replication.rb | 42 +
.../src/main/ruby/shell/commands/get_auths.rb | 5 +-
.../src/main/ruby/shell/commands/get_counter.rb | 10 +-
.../src/main/ruby/shell/commands/get_splits.rb | 46 +
.../src/main/ruby/shell/commands/incr.rb | 6 +-
.../src/main/ruby/shell/commands/set_auths.rb | 5 +-
.../src/main/ruby/shell/commands/status.rb | 9 +-
.../hadoop/hbase/client/AbstractTestShell.java | 69 +
.../hbase/client/TestReplicationShell.java | 38 +
.../apache/hadoop/hbase/client/TestShell.java | 52 +-
hbase-shell/src/test/ruby/hbase/admin_test.rb | 12 +
.../test/ruby/hbase/replication_admin_test.rb | 191 +
hbase-shell/src/test/ruby/hbase/table_test.rb | 17 +
.../ruby/hbase/visibility_labels_admin_test.rb | 2 +
hbase-shell/src/test/ruby/test_helper.rb | 20 +
hbase-shell/src/test/ruby/tests_runner.rb | 18 +
hbase-testing-util/src/main/asciidoc/.gitignore | 0
hbase-thrift/pom.xml | 77 +-
hbase-thrift/src/main/asciidoc/.gitignore | 0
.../hadoop/hbase/thrift/ThriftHttpServlet.java | 50 +-
.../hadoop/hbase/thrift/ThriftServerRunner.java | 228 +-
.../apache/hadoop/hbase/thrift2/HTablePool.java | 696 -
.../thrift2/ThriftHBaseServiceHandler.java | 68 +-
.../apache/hadoop/hbase/thrift2/hbase.thrift | 32 +-
.../hadoop/hbase/thrift2/TestHTablePool.java | 366 -
pom.xml | 294 +-
.../asciidoc/_chapters/appendix_acl_matrix.adoc | 134 +-
src/main/asciidoc/_chapters/architecture.adoc | 50 +-
src/main/asciidoc/_chapters/configuration.adoc | 7 +-
src/main/asciidoc/_chapters/developer.adoc | 2 +-
src/main/asciidoc/_chapters/hbase-default.adoc | 6 +-
src/main/asciidoc/_chapters/hbase_apis.adoc | 7 +
src/main/asciidoc/_chapters/images | 1 +
src/main/asciidoc/_chapters/ops_mgt.adoc | 95 +-
src/main/asciidoc/_chapters/performance.adoc | 11 +
src/main/asciidoc/_chapters/preface.adoc | 5 +
src/main/asciidoc/_chapters/rpc.adoc | 2 +-
src/main/asciidoc/_chapters/security.adoc | 103 +-
src/main/asciidoc/asciidoctor.css | 399 +
src/main/asciidoc/book.adoc | 18 +-
src/main/asciidoc/images | 1 +
.../resources/images/hbase_logo_with_orca.png | Bin 0 -> 11618 bytes
.../resources/images/hbase_logo_with_orca.xcf | Bin 0 -> 84265 bytes
.../images/jumping-orca_transparent_rotated.xcf | Bin 0 -> 135399 bytes
.../resources/images/region_split_process.png | Bin 0 -> 338255 bytes
src/main/site/site.xml | 37 +-
src/main/site/xdoc/index.xml | 12 +-
832 files changed, 95288 insertions(+), 31304 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --cc hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index e507245,efbc7d2..fe5a5f1
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@@ -3821,101 -4088,234 +4088,336 @@@ public class HBaseAdmin implements Admi
}
/**
+ * {@inheritDoc}
+ */
+ @Override
+ public void compactMob(final TableName tableName, final byte[] columnFamily)
+ throws IOException, InterruptedException {
+ checkTableNameNotNull(tableName);
+ checkFamilyNameNotNull(columnFamily);
+ validateMobColumnFamily(tableName, columnFamily);
+ compactMob(tableName, columnFamily, false);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void compactMob(final TableName tableName) throws IOException, InterruptedException {
+ checkTableNameNotNull(tableName);
+ compactMob(tableName, null, false);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void majorCompactMob(final TableName tableName, final byte[] columnFamily)
+ throws IOException, InterruptedException {
+ checkTableNameNotNull(tableName);
+ checkFamilyNameNotNull(columnFamily);
+ validateMobColumnFamily(tableName, columnFamily);
+ compactMob(tableName, columnFamily, true);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void majorCompactMob(final TableName tableName) throws IOException, InterruptedException {
+ checkTableNameNotNull(tableName);
+ compactMob(tableName, null, true);
+ }
+
++ /**
++ * {@inheritDoc}
++ */
+ @Override
+ public CompactionState getMobCompactionState(TableName tableName) throws IOException {
+ checkTableNameNotNull(tableName);
+ try {
+ ServerName master = getClusterStatus().getMaster();
+ HRegionInfo info = new HRegionInfo(tableName, Bytes.toBytes(".mob"),
+ HConstants.EMPTY_END_ROW, false, 0);
+ GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
+ info.getRegionName(), true);
+ GetRegionInfoResponse response = this.connection.getAdmin(master)
+ .getRegionInfo(null, request);
+ return response.getCompactionState();
+ } catch (ServiceException se) {
+ throw ProtobufUtil.getRemoteException(se);
+ }
+ }
+
+ /**
+ * Compacts the mob files in a mob-enabled column family. Asynchronous operation.
+ * @param tableName The table to compact.
+ * @param columnFamily The column family to compact. If it is null, all the mob-enabled
+ * column families in this table will be compacted.
+ * @param major Whether to select all the mob files in the compaction.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void compactMob(final TableName tableName, final byte[] columnFamily, boolean major)
+ throws IOException, InterruptedException {
+ // get the mob region info, this is a dummy region.
+ HRegionInfo info = new HRegionInfo(tableName, Bytes.toBytes(".mob"), HConstants.EMPTY_END_ROW,
+ false, 0);
+ ServerName master = getClusterStatus().getMaster();
+ compact(master, info, major, columnFamily);
+ }
+
+ private void checkTableNameNotNull(TableName tableName) {
+ if (tableName == null) {
+ throw new IllegalArgumentException("TableName cannot be null");
+ }
+ }
+
+ private void checkFamilyNameNotNull(byte[] columnFamily) {
+ if (columnFamily == null) {
+ throw new IllegalArgumentException("The column family name cannot be null");
+ }
+ }
+
+ private void validateMobColumnFamily(TableName tableName, byte[] columnFamily)
+ throws IOException {
+ HTableDescriptor htd = getTableDescriptor(tableName);
+ HColumnDescriptor family = htd.getFamily(columnFamily);
+ if (family == null || !family.isMobEnabled()) {
+ throw new IllegalArgumentException("Column family " + columnFamily
- + " is not a mob column family");
++ + " is not a mob column family");
++ }
++ }
++
++ /**
+ * Future that waits on a procedure result.
+ * Returned by the async version of the Admin calls,
+ * and used internally by the sync calls to wait on the result of the procedure.
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ protected static class ProcedureFuture<V> implements Future<V> {
+ private ExecutionException exception = null;
+ private boolean procResultFound = false;
+ private boolean done = false;
+ private V result = null;
+
+ private final HBaseAdmin admin;
+ private final Long procId;
+
+ public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
+ this.admin = admin;
+ this.procId = procId;
+ }
+
+ @Override
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean isCancelled() {
+ // TODO: Abort not implemented yet
+ return false;
+ }
+
+ @Override
+ public V get() throws InterruptedException, ExecutionException {
+ // TODO: should we ever spin forever?
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public V get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (!done) {
+ long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
+ try {
+ try {
+ // if the master support procedures, try to wait the result
+ if (procId != null) {
+ result = waitProcedureResult(procId, deadlineTs);
+ }
+ // if we don't have a proc result, try the compatibility wait
+ if (!procResultFound) {
+ result = waitOperationResult(deadlineTs);
+ }
+ result = postOperationResult(result, deadlineTs);
+ done = true;
+ } catch (IOException e) {
+ result = postOpeartionFailure(e, deadlineTs);
+ done = true;
+ }
+ } catch (IOException e) {
+ exception = new ExecutionException(e);
+ done = true;
+ }
+ }
+ if (exception != null) {
+ throw exception;
+ }
+ return result;
+ }
+
+ @Override
+ public boolean isDone() {
+ return done;
+ }
+
+ protected HBaseAdmin getAdmin() {
+ return admin;
+ }
+
+ private V waitProcedureResult(long procId, long deadlineTs)
+ throws IOException, TimeoutException, InterruptedException {
+ GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
+ .setProcId(procId)
+ .build();
+
+ int tries = 0;
+ IOException serviceEx = null;
+ while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
+ GetProcedureResultResponse response = null;
+ try {
+ // Try to fetch the result
+ response = getProcedureResult(request);
+ } catch (IOException e) {
+ serviceEx = unwrapException(e);
+
+ // the master may be down
+ LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
+
+ // Not much to do, if we have a DoNotRetryIOException
+ if (serviceEx instanceof DoNotRetryIOException) {
+ // TODO: looks like there is no way to unwrap this exception and get the proper
+ // UnsupportedOperationException aside from looking at the message.
+ // anyway, if we fail here we just failover to the compatibility side
+ // and that is always a valid solution.
+ LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
+ procResultFound = false;
+ return null;
+ }
+ }
+
+ // If the procedure is no longer running, we should have a result
+ if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
+ procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
+ return convertResult(response);
+ }
+
+ try {
+ Thread.sleep(getAdmin().getPauseTime(tries++));
+ } catch (InterruptedException e) {
+ throw new InterruptedException(
+ "Interrupted while waiting for the result of proc " + procId);
+ }
+ }
+ if (serviceEx != null) {
+ throw serviceEx;
+ } else {
+ throw new TimeoutException("The procedure " + procId + " is still running");
+ }
+ }
+
+ private static IOException unwrapException(IOException e) {
+ if (e instanceof RemoteException) {
+ return ((RemoteException)e).unwrapRemoteException();
+ }
+ return e;
+ }
+
+ protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
+ throws IOException {
+ return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
+ admin.getConnection()) {
+ @Override
+ public GetProcedureResultResponse call(int callTimeout) throws ServiceException {
+ return master.getProcedureResult(null, request);
+ }
+ });
+ }
+
+ /**
+ * Convert the procedure result response to a specified type.
+ * @param response the procedure result object to parse
+ * @return the result data of the procedure.
+ */
+ protected V convertResult(final GetProcedureResultResponse response) throws IOException {
+ if (response.hasException()) {
+ throw ForeignExceptionUtil.toIOException(response.getException());
+ }
+ return null;
+ }
+
+ /**
+ * Fallback implementation in case the procedure is not supported by the server.
+ * It should try to wait until the operation is completed.
+ * @param deadlineTs the timestamp after which this method should throw a TimeoutException
+ * @return the result data of the operation
+ */
+ protected V waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ return null;
+ }
+
+ /**
+ * Called after the operation is completed and the result fetched.
+ * this allows to perform extra steps after the procedure is completed.
+ * it allows to apply transformations to the result that will be returned by get().
+ * @param result the result of the procedure
+ * @param deadlineTs the timestamp after which this method should throw a TimeoutException
+ * @return the result of the procedure, which may be the same as the passed one
+ */
+ protected V postOperationResult(final V result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ return result;
+ }
+
+ /**
+ * Called after the operation is terminated with a failure.
+ * this allows to perform extra steps after the procedure is terminated.
+ * it allows to apply transformations to the result that will be returned by get().
+ * The default implementation will rethrow the exception
+ * @param exception the exception got from fetching the result
+ * @param deadlineTs the timestamp after which this method should throw a TimeoutException
+ * @return the result of the procedure, which may be the same as the passed one
+ */
+ protected V postOpeartionFailure(final IOException exception, final long deadlineTs)
+ throws IOException, TimeoutException {
+ throw exception;
+ }
+
+ protected interface WaitForStateCallable {
+ boolean checkState(int tries) throws IOException;
+ void throwInterruptedException() throws InterruptedIOException;
+ void throwTimeoutException(long elapsed) throws TimeoutException;
+ }
+
+ protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
+ throws IOException, TimeoutException {
+ int tries = 0;
+ IOException serverEx = null;
+ long startTime = EnvironmentEdgeManager.currentTime();
+ while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
+ serverEx = null;
+ try {
+ if (callable.checkState(tries)) {
+ return;
+ }
+ } catch (IOException e) {
+ serverEx = e;
+ }
+ try {
+ Thread.sleep(getAdmin().getPauseTime(tries++));
+ } catch (InterruptedException e) {
+ callable.throwInterruptedException();
+ }
+ }
+ if (serverEx != null) {
+ throw unwrapException(serverEx);
+ } else {
+ callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --cc hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 7bb9de1,8b5b2d7..3b71435
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@@ -39,8 -37,6 +39,7 @@@ import java.util.Map.Entry
import java.util.NavigableSet;
import java.util.concurrent.TimeUnit;
+import com.google.protobuf.*;
- import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
@@@ -2990,85 -3024,39 +3019,120 @@@ public final class ProtobufUtil
return desc.build();
}
+
-
+ /**
+ * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding
+ * buffers
+ * @param builder current message builder
+ * @param in Inputsream with delimited protobuf data
+ * @throws IOException
+ */
+ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) throws IOException {
+ // This used to be builder.mergeDelimitedFrom(in);
+ // but is replaced to allow us to bump the protobuf size limit.
+ final int firstByte = in.read();
+ if (firstByte == -1) {
+ // bail out. (was return false;)
+ } else {
+ final int size = CodedInputStream.readRawVarint32(firstByte, in);
+ final InputStream limitedInput = new LimitedInputStream(in, size);
+ final CodedInputStream codedInput = CodedInputStream.newInstance(limitedInput);
+ codedInput.setSizeLimit(size);
+ builder.mergeFrom(codedInput);
+ codedInput.checkLastTagWas(0);
+ }
+ }
+
+ /**
+ * This is cut and paste from protobuf's package private AbstractMessageLite.
+ *
+ * An InputStream implementations which reads from some other InputStream
+ * but is limited to a particular number of bytes. Used by
+ * mergeDelimitedFrom(). This is intentionally package-private so that
+ * UnknownFieldSet can share it.
+ */
+ static final class LimitedInputStream extends FilterInputStream {
+ private int limit;
+
+ LimitedInputStream(InputStream in, int limit) {
+ super(in);
+ this.limit = limit;
+ }
+
+ @Override
+ public int available() throws IOException {
+ return Math.min(super.available(), limit);
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (limit <= 0) {
+ return -1;
+ }
+ final int result = super.read();
+ if (result >= 0) {
+ --limit;
+ }
+ return result;
+ }
+
+ @Override
+ public int read(final byte[] b, final int off, int len)
+ throws IOException {
+ if (limit <= 0) {
+ return -1;
+ }
+ len = Math.min(len, limit);
+ final int result = super.read(b, off, len);
+ if (result >= 0) {
+ limit -= result;
+ }
+ return result;
+ }
+
+ @Override
+ public long skip(final long n) throws IOException {
+ final long result = super.skip(Math.min(n, limit));
+ if (result >= 0) {
+ limit -= result;
+ }
+ return result;
+ }
+ }
++
+ public static ReplicationLoadSink toReplicationLoadSink(
+ ClusterStatusProtos.ReplicationLoadSink cls) {
+ return new ReplicationLoadSink(cls.getAgeOfLastAppliedOp(), cls.getTimeStampsOfLastAppliedOp());
+ }
+
+ public static ReplicationLoadSource toReplicationLoadSource(
+ ClusterStatusProtos.ReplicationLoadSource cls) {
+ return new ReplicationLoadSource(cls.getPeerID(), cls.getAgeOfLastShippedOp(),
+ cls.getSizeOfLogQueue(), cls.getTimeStampOfLastShippedOp(), cls.getReplicationLag());
+ }
+
+ public static List<ReplicationLoadSource> toReplicationLoadSourceList(
+ List<ClusterStatusProtos.ReplicationLoadSource> clsList) {
+ ArrayList<ReplicationLoadSource> rlsList = new ArrayList<ReplicationLoadSource>();
+ for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) {
+ rlsList.add(toReplicationLoadSource(cls));
+ }
+ return rlsList;
+ }
+
+ /**
+ * Get a protocol buffer VersionInfo
+ *
+ * @return the converted protocol buffer VersionInfo
+ */
+ public static RPCProtos.VersionInfo getVersionInfo() {
+ RPCProtos.VersionInfo.Builder builder = RPCProtos.VersionInfo.newBuilder();
+ builder.setVersion(VersionInfo.getVersion());
+ builder.setUrl(VersionInfo.getUrl());
+ builder.setRevision(VersionInfo.getRevision());
+ builder.setUser(VersionInfo.getUser());
+ builder.setDate(VersionInfo.getDate());
+ builder.setSrcChecksum(VersionInfo.getSrcChecksum());
+ return builder.build();
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
----------------------------------------------------------------------
diff --cc hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index 7f3838f,7728112..678cc7b
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@@ -36,16 -34,8 +36,16 @@@ public class PrettyPrinter
StringBuilder human = new StringBuilder();
switch (unit) {
case TIME_INTERVAL:
- human.append(humanReadableTTL(Long.valueOf(value)));
+ human.append(humanReadableTTL(Long.parseLong(value)));
break;
+ case LONG:
+ byte[] longBytes = Bytes.toBytesBinary(value);
+ human.append(String.valueOf(Bytes.toLong(longBytes)));
+ break;
+ case BOOLEAN:
+ byte[] booleanBytes = Bytes.toBytesBinary(value);
+ human.append(String.valueOf(Bytes.toBoolean(booleanBytes)));
+ break;
default:
human.append(value);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
----------------------------------------------------------------------
diff --cc hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 2aad115,9a66da0..b609b4a
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@@ -253,77 -258,6 +258,76 @@@ public interface MetricsRegionServerWra
long getMajorCompactedCellsSize();
/**
- <<<<<<< HEAD
+ * Gets the number of cells move to mob during compaction.
+ */
+ long getMobCompactedIntoMobCellsCount();
+
+ /**
+ * Gets the number of cells move from mob during compaction.
+ */
+ long getMobCompactedFromMobCellsCount();
+
+ /**
+ * Gets the total amount of cells move to mob during compaction, in bytes.
+ */
+ long getMobCompactedIntoMobCellsSize();
+
+ /**
+ * Gets the total amount of cells move from mob during compaction, in bytes.
+ */
+ long getMobCompactedFromMobCellsSize();
+
+ /**
+ * Gets the number of the flushes in mob-enabled stores.
+ */
+ long getMobFlushCount();
+
+ /**
+ * Gets the number of mob cells flushed to disk.
+ */
+ long getMobFlushedCellsCount();
+
+ /**
+ * Gets the total amount of mob cells flushed to disk, in bytes.
+ */
+ long getMobFlushedCellsSize();
+
+ /**
+ * Gets the number of scanned mob cells.
+ */
+ long getMobScanCellsCount();
+
+ /**
+ * Gets the total amount of scanned mob cells, in bytes.
+ */
+ long getMobScanCellsSize();
+
+ /**
+ * Gets the count of accesses to the mob file cache.
+ */
+ long getMobFileCacheAccessCount();
+
+ /**
+ * Gets the count of misses to the mob file cache.
+ */
+ long getMobFileCacheMissCount();
+
+ /**
+ * Gets the number of items evicted from the mob file cache.
+ */
+ long getMobFileCacheEvictedCount();
+
+ /**
+ * Gets the count of cached mob files.
+ */
+ long getMobFileCacheCount();
+
+ /**
+ * Gets the hit percent to the mob file cache.
+ */
+ int getMobFileCacheHitPercent();
+
+ /**
* @return Count of hedged read operations
*/
public long getHedgedReadOps();
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
----------------------------------------------------------------------
diff --cc hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
index 85c01cc,0000000..82a599c
mode 100644,000000..100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
@@@ -1,155 -1,0 +1,156 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.hbase.client.HBaseAdmin;
++import org.apache.hadoop.hbase.client.Admin;
++import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.LoadTestDataGeneratorWithMOB;
+import org.apache.hadoop.hbase.util.LoadTestTool;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Integration Test for MOB ingest.
+ */
+@Category(IntegrationTests.class)
+public class IntegrationTestIngestWithMOB extends IntegrationTestIngest {
+ private static final char COLON = ':';
+
- private byte[] mobColumnFamily = LoadTestTool.COLUMN_FAMILY;
++ private byte[] mobColumnFamily = LoadTestTool.DEFAULT_COLUMN_FAMILY;
+ public static final String THRESHOLD = "threshold";
+ public static final String MIN_MOB_DATA_SIZE = "minMobDataSize";
+ public static final String MAX_MOB_DATA_SIZE = "maxMobDataSize";
+ private int threshold = 1024; // 1KB
+ private int minMobDataSize = 512; // 512B
+ private int maxMobDataSize = threshold * 5; // 5KB
+ private static final long JUNIT_RUN_TIME = 2 * 60 * 1000; // 2 minutes
+
+ //similar to LOAD_TEST_TOOL_INIT_ARGS except OPT_IN_MEMORY is removed
+ protected String[] LOAD_TEST_TOOL_MOB_INIT_ARGS = {
+ LoadTestTool.OPT_COMPRESSION,
+ LoadTestTool.OPT_DATA_BLOCK_ENCODING,
+ LoadTestTool.OPT_ENCRYPTION,
+ LoadTestTool.OPT_NUM_REGIONS_PER_SERVER,
+ LoadTestTool.OPT_REGION_REPLICATION,
+ };
+
+ @Override
+ protected String[] getArgsForLoadTestToolInitTable() {
+ List<String> args = new ArrayList<String>();
+ args.add("-tn");
+ args.add(getTablename().getNameAsString());
+ // pass all remaining args from conf with keys <test class name>.<load test tool arg>
+ String clazz = this.getClass().getSimpleName();
+ for (String arg : LOAD_TEST_TOOL_MOB_INIT_ARGS) {
+ String val = conf.get(String.format("%s.%s", clazz, arg));
+ if (val != null) {
+ args.add("-" + arg);
+ args.add(val);
+ }
+ }
+ args.add("-init_only");
+ return args.toArray(new String[args.size()]);
+ }
+
+ @Override
+ protected void addOptions() {
+ super.addOptions();
+ super.addOptWithArg(THRESHOLD, "The threshold to classify cells to mob data");
+ super.addOptWithArg(MIN_MOB_DATA_SIZE, "Minimum value size for mob data");
+ super.addOptWithArg(MAX_MOB_DATA_SIZE, "Maximum value size for mob data");
+ }
+
+ @Override
+ protected void processOptions(CommandLine cmd) {
+ super.processOptions(cmd);
+ if (cmd.hasOption(THRESHOLD)) {
+ threshold = Integer.parseInt(cmd.getOptionValue(THRESHOLD));
+ }
+ if (cmd.hasOption(MIN_MOB_DATA_SIZE)) {
+ minMobDataSize = Integer.parseInt(cmd.getOptionValue(MIN_MOB_DATA_SIZE));
+ }
+ if (cmd.hasOption(MAX_MOB_DATA_SIZE)) {
+ maxMobDataSize = Integer.parseInt(cmd.getOptionValue(MAX_MOB_DATA_SIZE));
+ }
+ if (minMobDataSize > maxMobDataSize) {
+ throw new IllegalArgumentException(
+ "The minMobDataSize should not be larger than minMobDataSize");
+ }
+ }
+
+ @Test
+ public void testIngest() throws Exception {
+ runIngestTest(JUNIT_RUN_TIME, 100, 10, 1024, 10, 20);
+ };
+
+ @Override
+ protected void initTable() throws IOException {
+ super.initTable();
+
- byte[] tableName = getTablename().getName();
- HBaseAdmin admin = new HBaseAdmin(conf);
++ TableName tableName = getTablename();
++ Admin admin = ConnectionFactory.createConnection().getAdmin();
+ HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
+ LOG.info("Disabling table " + getTablename());
+ admin.disableTable(tableName);
+ for (HColumnDescriptor columnDescriptor : tableDesc.getFamilies()) {
+ if(Arrays.equals(columnDescriptor.getName(), mobColumnFamily)) {
+ columnDescriptor.setMobEnabled(true);
+ columnDescriptor.setMobThreshold((long) threshold);
+ admin.modifyColumn(tableName, columnDescriptor);
+ }
+ }
+ LOG.info("Enabling table " + getTablename());
+ admin.enableTable(tableName);
+ admin.close();
+ }
+
+ @Override
+ protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey,
+ long numKeys) {
+ String[] args = super.getArgsForLoadTestTool(mode, modeSpecificArg, startKey, numKeys);
+ List<String> tmp = new ArrayList<String>(Arrays.asList(args));
+ // LoadTestDataGeneratorMOB:mobColumnFamily:minMobDataSize:maxMobDataSize
+ tmp.add(HIPHEN + LoadTestTool.OPT_GENERATOR);
+ StringBuilder sb = new StringBuilder(LoadTestDataGeneratorWithMOB.class.getName());
+ sb.append(COLON);
+ sb.append(Bytes.toString(mobColumnFamily));
+ sb.append(COLON);
+ sb.append(minMobDataSize);
+ sb.append(COLON);
+ sb.append(maxMobDataSize);
+ tmp.add(sb.toString());
+ return tmp.toArray(new String[tmp.size()]);
+ }
+
+ public static void main(String[] args) throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ IntegrationTestingUtility.setUseDistributedCluster(conf);
+ int ret = ToolRunner.run(conf, new IntegrationTestIngestWithMOB(), args);
+ System.exit(ret);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index fe6f06e,9bd1dbb..5a1e188
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@@ -110,8 -107,8 +108,10 @@@ import org.apache.hadoop.hbase.monitori
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
+ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+ import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@@ -127,10 -124,9 +127,10 @@@ import org.apache.hadoop.hbase.util.Byt
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.EncryptionTest;
import org.apache.hadoop.hbase.util.FSUtils;
- import org.apache.hadoop.hbase.util.HBaseFsckRepair;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.HasThread;
+import org.apache.hadoop.hbase.util.IdLock;
+ import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
@@@ -1078,13 -1077,35 +1096,41 @@@ public class HMaster extends HRegionSer
if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
}
+ private void startProcedureExecutor() throws IOException {
+ final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
+ final Path logDir = new Path(fileSystemManager.getRootDir(),
+ MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
+
+ procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
+ new MasterProcedureEnv.WALStoreLeaseRecovery(this));
+ procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
+ procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
+ procEnv.getProcedureQueue());
+
+ final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
+ Math.max(Runtime.getRuntime().availableProcessors(),
+ MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
+ procedureStore.start(numThreads);
+ procedureExecutor.start(numThreads);
+ }
+
+ private void stopProcedureExecutor() {
+ if (procedureExecutor != null) {
+ procedureExecutor.stop();
+ }
+
+ if (procedureStore != null) {
+ procedureStore.stop(isAborted());
+ }
+ }
+
private void stopChores() {
+ if (this.expiredMobFileCleanerChore != null) {
+ this.expiredMobFileCleanerChore.cancel(true);
+ }
+ if (this.mobFileCompactChore != null) {
+ this.mobFileCompactChore.cancel(true);
+ }
if (this.balancerChore != null) {
this.balancerChore.cancel(true);
}
@@@ -2333,56 -2408,23 +2436,76 @@@
}
/**
+ * Gets the mob file compaction state for a specific table.
+ * Whether all the mob files are selected is known during the compaction execution, but
+ * the statistic is done just before compaction starts, it is hard to know the compaction
+ * type at that time, so the rough statistics are chosen for the mob file compaction. Only two
+ * compaction states are available, CompactionState.MAJOR_AND_MINOR and CompactionState.NONE.
+ * @param tableName The current table name.
+ * @return If a given table is in mob file compaction now.
+ */
+ public CompactionState getMobCompactionState(TableName tableName) {
+ AtomicInteger compactionsCount = mobFileCompactionStates.get(tableName);
+ if (compactionsCount != null && compactionsCount.get() != 0) {
+ return CompactionState.MAJOR_AND_MINOR;
+ }
+ return CompactionState.NONE;
+ }
+
+ public void reportMobFileCompactionStart(TableName tableName) throws IOException {
+ IdLock.Entry lockEntry = null;
+ try {
+ lockEntry = mobFileCompactionLock.getLockEntry(tableName.hashCode());
+ AtomicInteger compactionsCount = mobFileCompactionStates.get(tableName);
+ if (compactionsCount == null) {
+ compactionsCount = new AtomicInteger(0);
+ mobFileCompactionStates.put(tableName, compactionsCount);
+ }
+ compactionsCount.incrementAndGet();
+ } finally {
+ if (lockEntry != null) {
+ mobFileCompactionLock.releaseLockEntry(lockEntry);
+ }
+ }
+ }
+
+ public void reportMobFileCompactionEnd(TableName tableName) throws IOException {
+ IdLock.Entry lockEntry = null;
+ try {
+ lockEntry = mobFileCompactionLock.getLockEntry(tableName.hashCode());
+ AtomicInteger compactionsCount = mobFileCompactionStates.get(tableName);
+ if (compactionsCount != null) {
+ int count = compactionsCount.decrementAndGet();
+ // remove the entry if the count is 0.
+ if (count == 0) {
+ mobFileCompactionStates.remove(tableName);
+ }
+ }
+ } finally {
+ if (lockEntry != null) {
+ mobFileCompactionLock.releaseLockEntry(lockEntry);
+ }
+ }
++ }
++
++ /**
+ * Queries the state of the {@link LoadBalancerTracker}. If the balancer is not initialized,
+ * false is returned.
+ *
+ * @return The state of the load balancer, or false if the load balancer isn't defined.
+ */
+ public boolean isBalancerOn() {
+ if (null == loadBalancerTracker) return false;
+ return loadBalancerTracker.isBalancerOn();
+ }
+ /**
+ * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
+ *
+ * @return The name of the {@link LoadBalancer} in use.
+ */
+ public String getLoadBalancerClassName() {
+ return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory
+ .getDefaultLoadBalancerClass().getName());
}
}
[14/50] [abbrv] hbase git commit: HBASE-13204 Procedure v2 - client
create/delete table sync
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index dce0737..cc6f201 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -9074,6 +9074,16 @@ public final class MasterProtos {
public interface CreateTableResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // optional uint64 proc_id = 1;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ long getProcId();
}
/**
* Protobuf type {@code CreateTableResponse}
@@ -9108,6 +9118,7 @@ public final class MasterProtos {
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
@@ -9125,6 +9136,11 @@ public final class MasterProtos {
}
break;
}
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -9164,7 +9180,25 @@ public final class MasterProtos {
return PARSER;
}
+ private int bitField0_;
+ // optional uint64 proc_id = 1;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
private void initFields() {
+ procId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -9178,6 +9212,9 @@ public final class MasterProtos {
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -9187,6 +9224,10 @@ public final class MasterProtos {
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -9210,6 +9251,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse) obj;
boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -9223,6 +9269,10 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -9332,6 +9382,8 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@@ -9358,6 +9410,13 @@ public final class MasterProtos {
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.procId_ = procId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -9373,6 +9432,9 @@ public final class MasterProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()) return this;
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -9398,6 +9460,40 @@ public final class MasterProtos {
}
return this;
}
+ private int bitField0_;
+
+ // optional uint64 proc_id = 1;
+ private long procId_ ;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:CreateTableResponse)
}
@@ -9973,6 +10069,16 @@ public final class MasterProtos {
public interface DeleteTableResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // optional uint64 proc_id = 1;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ long getProcId();
}
/**
* Protobuf type {@code DeleteTableResponse}
@@ -10007,6 +10113,7 @@ public final class MasterProtos {
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
@@ -10024,6 +10131,11 @@ public final class MasterProtos {
}
break;
}
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -10063,7 +10175,25 @@ public final class MasterProtos {
return PARSER;
}
+ private int bitField0_;
+ // optional uint64 proc_id = 1;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
private void initFields() {
+ procId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -10077,6 +10207,9 @@ public final class MasterProtos {
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -10086,6 +10219,10 @@ public final class MasterProtos {
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -10109,6 +10246,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse) obj;
boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -10122,6 +10264,10 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -10231,6 +10377,8 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@@ -10257,6 +10405,13 @@ public final class MasterProtos {
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.procId_ = procId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -10272,6 +10427,9 @@ public final class MasterProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()) return this;
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -10297,6 +10455,40 @@ public final class MasterProtos {
}
return this;
}
+ private int bitField0_;
+
+ // optional uint64 proc_id = 1;
+ private long procId_ ;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:DeleteTableResponse)
}
@@ -42809,21 +43001,1161 @@ public final class MasterProtos {
break;
}
case 8: {
- bitField0_ |= 0x00000001;
- done_ = input.readBool();
+ bitField0_ |= 0x00000001;
+ done_ = input.readBool();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = snapshot_.toBuilder();
+ }
+ snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(snapshot_);
+ snapshot_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<IsProcedureDoneResponse> PARSER =
+ new com.google.protobuf.AbstractParser<IsProcedureDoneResponse>() {
+ public IsProcedureDoneResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new IsProcedureDoneResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<IsProcedureDoneResponse> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional bool done = 1 [default = false];
+ public static final int DONE_FIELD_NUMBER = 1;
+ private boolean done_;
+ /**
+ * <code>optional bool done = 1 [default = false];</code>
+ */
+ public boolean hasDone() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bool done = 1 [default = false];</code>
+ */
+ public boolean getDone() {
+ return done_;
+ }
+
+ // optional .ProcedureDescription snapshot = 2;
+ public static final int SNAPSHOT_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_;
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() {
+ return snapshot_;
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() {
+ return snapshot_;
+ }
+
+ private void initFields() {
+ done_ = false;
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (hasSnapshot()) {
+ if (!getSnapshot().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, done_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, snapshot_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, done_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, snapshot_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) obj;
+
+ boolean result = true;
+ result = result && (hasDone() == other.hasDone());
+ if (hasDone()) {
+ result = result && (getDone()
+ == other.getDone());
+ }
+ result = result && (hasSnapshot() == other.hasSnapshot());
+ if (hasSnapshot()) {
+ result = result && getSnapshot()
+ .equals(other.getSnapshot());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasDone()) {
+ hash = (37 * hash) + DONE_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getDone());
+ }
+ if (hasSnapshot()) {
+ hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshot().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code IsProcedureDoneResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSnapshotFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ done_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.done_ = done_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (snapshotBuilder_ == null) {
+ result.snapshot_ = snapshot_;
+ } else {
+ result.snapshot_ = snapshotBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()) return this;
+ if (other.hasDone()) {
+ setDone(other.getDone());
+ }
+ if (other.hasSnapshot()) {
+ mergeSnapshot(other.getSnapshot());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (hasSnapshot()) {
+ if (!getSnapshot().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional bool done = 1 [default = false];
+ private boolean done_ ;
+ /**
+ * <code>optional bool done = 1 [default = false];</code>
+ */
+ public boolean hasDone() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional bool done = 1 [default = false];</code>
+ */
+ public boolean getDone() {
+ return done_;
+ }
+ /**
+ * <code>optional bool done = 1 [default = false];</code>
+ */
+ public Builder setDone(boolean value) {
+ bitField0_ |= 0x00000001;
+ done_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bool done = 1 [default = false];</code>
+ */
+ public Builder clearDone() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ done_ = false;
+ onChanged();
+ return this;
+ }
+
+ // optional .ProcedureDescription snapshot = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder> snapshotBuilder_;
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() {
+ if (snapshotBuilder_ == null) {
+ return snapshot_;
+ } else {
+ return snapshotBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ snapshot_ = value;
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public Builder setSnapshot(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder builderForValue) {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = builderForValue.build();
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance()) {
+ snapshot_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
+ } else {
+ snapshot_ = value;
+ }
+ onChanged();
+ } else {
+ snapshotBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public Builder clearSnapshot() {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance();
+ onChanged();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder getSnapshotBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getSnapshotFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() {
+ if (snapshotBuilder_ != null) {
+ return snapshotBuilder_.getMessageOrBuilder();
+ } else {
+ return snapshot_;
+ }
+ }
+ /**
+ * <code>optional .ProcedureDescription snapshot = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>
+ getSnapshotFieldBuilder() {
+ if (snapshotBuilder_ == null) {
+ snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder>(
+ snapshot_,
+ getParentForChildren(),
+ isClean());
+ snapshot_ = null;
+ }
+ return snapshotBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:IsProcedureDoneResponse)
+ }
+
+ static {
+ defaultInstance = new IsProcedureDoneResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:IsProcedureDoneResponse)
+ }
+
+ public interface GetProcedureResultRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 proc_id = 1;
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ long getProcId();
+ }
+ /**
+ * Protobuf type {@code GetProcedureResultRequest}
+ */
+ public static final class GetProcedureResultRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements GetProcedureResultRequestOrBuilder {
+ // Use GetProcedureResultRequest.newBuilder() to construct.
+ private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GetProcedureResultRequest defaultInstance;
+ public static GetProcedureResultRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetProcedureResultRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GetProcedureResultRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<GetProcedureResultRequest> PARSER =
+ new com.google.protobuf.AbstractParser<GetProcedureResultRequest>() {
+ public GetProcedureResultRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new GetProcedureResultRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<GetProcedureResultRequest> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required uint64 proc_id = 1;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
+ private void initFields() {
+ procId_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasProcId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj;
+
+ boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code GetProcedureResultRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.procId_ = procId_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this;
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasProcId()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required uint64 proc_id = 1;
+ private long procId_ ;
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required uint64 proc_id = 1;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GetProcedureResultRequest)
+ }
+
+ static {
+ defaultInstance = new GetProcedureResultRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetProcedureResultRequest)
+ }
+
+ public interface GetProcedureResultResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .GetProcedureResultResponse.State state = 1;
+ /**
+ * <code>required .GetProcedureResultResponse.State state = 1;</code>
+ */
+ boolean hasState();
+ /**
+ * <code>required .GetProcedureResultResponse.State state = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState();
+
+ // optional uint64 start_time = 2;
+ /**
+ * <code>optional uint64 start_time = 2;</code>
+ */
+ boolean hasStartTime();
+ /**
+ * <code>optional uint64 start_time = 2;</code>
+ */
+ long getStartTime();
+
+ // optional uint64 last_update = 3;
+ /**
+ * <code>optional uint64 last_update = 3;</code>
+ */
+ boolean hasLastUpdate();
+ /**
+ * <code>optional uint64 last_update = 3;</code>
+ */
+ long getLastUpdate();
+
+ // optional bytes result = 4;
+ /**
+ * <code>optional bytes result = 4;</code>
+ */
+ boolean hasResult();
+ /**
+ * <code>optional bytes result = 4;</code>
+ */
+ com.google.protobuf.ByteString getResult();
+
+ // optional .ForeignExceptionMessage exception = 5;
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 5;</code>
+ */
+ boolean hasException();
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 5;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException();
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 5;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder();
+ }
+ /**
+ * Protobuf type {@code GetProcedureResultResponse}
+ */
+ public static final class GetProcedureResultResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements GetProcedureResultResponseOrBuilder {
+ // Use GetProcedureResultResponse.newBuilder() to construct.
+ private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final GetProcedureResultResponse defaultInstance;
+ public static GetProcedureResultResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetProcedureResultResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private GetProcedureResultResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(1, rawValue);
+ } else {
+ bitField0_ |= 0x00000001;
+ state_ = value;
+ }
break;
}
- case 18: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.Builder subBuilder = null;
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- subBuilder = snapshot_.toBuilder();
+ case 16: {
+ bitField0_ |= 0x00000002;
+ startTime_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ lastUpdate_ = input.readUInt64();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ result_ = input.readBytes();
+ break;
+ }
+ case 42: {
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ subBuilder = exception_.toBuilder();
}
- snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.PARSER, extensionRegistry);
+ exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry);
if (subBuilder != null) {
- subBuilder.mergeFrom(snapshot_);
- snapshot_ = subBuilder.buildPartial();
+ subBuilder.mergeFrom(exception_);
+ exception_ = subBuilder.buildPartial();
}
- bitField0_ |= 0x00000002;
+ bitField0_ |= 0x00000010;
break;
}
}
@@ -42840,84 +44172,224 @@ public final class MasterProtos {
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class);
}
- public static com.google.protobuf.Parser<IsProcedureDoneResponse> PARSER =
- new com.google.protobuf.AbstractParser<IsProcedureDoneResponse>() {
- public IsProcedureDoneResponse parsePartialFrom(
+ public static com.google.protobuf.Parser<GetProcedureResultResponse> PARSER =
+ new com.google.protobuf.AbstractParser<GetProcedureResultResponse>() {
+ public GetProcedureResultResponse parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return new IsProcedureDoneResponse(input, extensionRegistry);
+ return new GetProcedureResultResponse(input, extensionRegistry);
}
};
@java.lang.Override
- public com.google.protobuf.Parser<IsProcedureDoneResponse> getParserForType() {
+ public com.google.protobuf.Parser<GetProcedureResultResponse> getParserForType() {
return PARSER;
}
+ /**
+ * Protobuf enum {@code GetProcedureResultResponse.State}
+ */
+ public enum State
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>NOT_FOUND = 0;</code>
+ */
+ NOT_FOUND(0, 0),
+ /**
+ * <code>RUNNING = 1;</code>
+ */
+ RUNNING(1, 1),
+ /**
+ * <code>FINISHED = 2;</code>
+ */
+ FINISHED(2, 2),
+ ;
+
+ /**
+ * <code>NOT_FOUND = 0;</code>
+ */
+ public static final int NOT_FOUND_VALUE = 0;
+ /**
+ * <code>RUNNING = 1;</code>
+ */
+ public static final int RUNNING_VALUE = 1;
+ /**
+ * <code>FINISHED = 2;</code>
+ */
+ public static final int FINISHED_VALUE = 2;
+
+
+ public final int getNumber() { return value; }
+
+ public static State valueOf(int value) {
+ switch (value) {
+ case 0: return NOT_FOUND;
+ case 1: return RUNNING;
+ case 2: return FINISHED;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<State>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<State>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<State>() {
+ public State findValueByNumber(int number) {
+ return State.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final State[] VALUES = values();
+
+ public static State valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private State(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:GetProcedureResultResponse.State)
+ }
+
private int bitField0_;
- // optional bool done = 1 [default = false];
- public static final int DONE_FIELD_NUMBER = 1;
- private boolean done_;
+ // required .GetProcedureResultResponse.State state = 1;
+ public static final int STATE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_;
/**
- * <code>optional bool done = 1 [default = false];</code>
+ * <code>required .GetProcedureResultResponse.State state = 1;</code>
*/
- public boolean hasDone() {
+ public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>optional bool done = 1 [default = false];</code>
+ * <code>required .GetProcedureResultResponse.State state = 1;</code>
*/
- public boolean getDone() {
- return done_;
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() {
+ return state_;
}
- // optional .ProcedureDescription snapshot = 2;
- public static final int SNAPSHOT_FIELD_NUMBER = 2;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription snapshot_;
+ // optional uint64 start_time = 2;
+ public static final int START_TIME_FIELD_NUMBER = 2;
+ private long startTime_;
/**
- * <code>optional .ProcedureDescription snapshot = 2;</code>
+ * <code>optional uint64 start_time = 2;</code>
*/
- public boolean hasSnapshot() {
+ public boolean hasStartTime() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * <code>optional .ProcedureDescription snapshot = 2;</code>
+ * <code>optional uint64 start_time = 2;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription getSnapshot() {
- return snapshot_;
+ public long getStartTime() {
+ return startTime_;
}
+
+ // optional uint64 last_update = 3;
+ public static final int LAST_UPDATE_FIELD_NUMBER = 3;
+ private long lastUpdate_;
/**
- * <code>optional .ProcedureDescription snapshot = 2;</code>
+ * <code>optional uint64 last_update = 3;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescriptionOrBuilder getSnapshotOrBuilder() {
- return snapshot_;
+ public boolean hasLastUpdate() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional uint64 last_update = 3;</code>
+ */
+ public long getLastUpdate() {
+ return lastUpdate_;
+ }
+
+ // optional bytes result = 4;
+ public static final int RESULT_FIELD_NUMBER = 4;
+ private com.google.protobuf.ByteString result_;
+ /**
+ * <code>optional bytes result = 4;</code>
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional bytes result = 4;</code>
+ */
+ public com.google.protobuf.ByteString getResult() {
+ return result_;
+ }
+
+ // optional .ForeignExceptionMessage exception = 5;
+ public static final int EXCEPTION_FIELD_NUMBER = 5;
+ private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_;
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 5;</code>
+ */
+ public boolean hasException() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 5;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() {
+ return exception_;
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 5;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() {
+ return exception_;
}
private void initFields() {
- done_ = false;
- snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance();
+ state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND;
+ startTime_ = 0L;
+ lastUpdate_ = 0L;
+ result_ = com.google.protobuf.ByteString.EMPTY;
+ exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (hasSnapshot()) {
- if (!getSnapshot().isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
+ if (!hasState()) {
+ memoizedIsInitialized = 0;
+ return false;
}
memoizedIsInitialized = 1;
return true;
@@ -42927,10 +44399,19 @@ public final class MasterProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBool(1, done_);
+ output.writeEnum(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeMessage(2, snapshot_);
+ output.writeUInt64(2, startTime_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, lastUpdate_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, result_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeMessage(5, exception_);
}
getUnknownFields().writeTo(output);
}
@@ -42943,11 +44424,23 @@ public final class MasterProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeBoolSize(1, done_);
+ .computeEnumSize(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, snapshot_);
+ .computeUInt64Size(2, startTime_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, lastUpdate_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, result_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, exception_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -42966,21 +44459,36 @@ public final class MasterProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)) {
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) {
return super.equals(obj);
}
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) obj;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj;
boolean result = true;
- result = result && (hasDone() == other.hasDone());
- if (hasDone()) {
- result = result && (getDone()
- == other.getDone());
- }
- result = result && (hasSnapshot() == other.hasSnapshot());
- if (hasSnapshot()) {
- result = result && getSnapshot()
- .equals(other.getSnapshot());
+ result = result && (hasState() == other.hasState());
+ if (hasState()) {
+ result = result &&
+ (getState() == other.getState());
+ }
+ result = result && (hasStartTime() == other.hasStartTime());
+ if (hasStartTime()) {
+ result = result && (getStartTime()
+ == other.getStartTime());
+ }
+ result = result && (hasLastUpdate() == other.hasLastUpdate());
+ if (hasLastUpdate()) {
+ result = result && (getLastUpdate()
+ == other.getLastUpdate());
+ }
+ result = result && (hasResult() == other.hasResult());
+ if (hasResult()) {
+ result = result && getResult()
+ .equals(other.getResult());
+ }
+ result = result && (hasException() == other.hasException());
+ if (hasException()) {
+ result = result && getException()
+ .equals(other.getException());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
@@ -42995,66 +44503,78 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasDone()) {
- hash = (37 * hash) + DONE_FIELD_NUMBER;
- hash = (53 * hash) + hashBoolean(getDone());
+ if (hasState()) {
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getState());
}
- if (hasSnapshot()) {
- hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
- hash = (53 * hash) + getSnapshot().hashCode();
+ if (hasStartTime()) {
+ hash = (37 * hash) + START_TIME_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getStartTime());
+ }
+ if (hasLastUpdate()) {
+ hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLastUpdate());
+ }
+ if (hasResult()) {
+ hash = (37 * hash) + RESULT_FIELD_NUMBER;
+ hash = (53 * hash) + getResult().hashCode();
+ }
+ if (hasException()) {
+ hash = (37 * hash) + EXCEPTION_FIELD_NUMBER;
+ hash = (53 * hash) + getException().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(byte[] data)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseDelimitedFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -43063,7 +44583,7 @@ public final class MasterProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -43075,24 +44595,24 @@ public final class MasterProtos {
return builder;
}
/**
- * Protobuf type {@code IsProcedureDoneResponse}
+ * Protobuf type {@code GetProcedureResultResponse}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponseOrBuilder {
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class);
}
- // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.newBuilder()
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -43104,7 +44624,7 @@ public final class MasterProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getSnapshotFieldBuilder();
+ getExceptionFieldBuilder();
}
}
private static Builder create() {
@@ -43113,14 +44633,20 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
- done_ = false;
+ state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND;
bitField0_ = (bitField0_ & ~0x00000001);
- if (snapshotBuilder_ == null) {
- snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription.getDefaultInstance();
+ startTime_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ lastUpdate_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ result_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ if (exceptionBuilder_ == null) {
+ exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance();
} else {
- snapshotBuilder_.clear();
+ exceptionBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000002);
+ bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@@ -43130,36 +44656,48 @@ public final class MasterProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsProcedureDoneResponse_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse build() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = buildPartial();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse(this);
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
- result.done_ = done_;
+ result.state_ = state_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- if (snapshotBuilder_ == null) {
- result.snapshot_ = snapshot_;
+ result.startTime_ = startTime_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.lastUpdate_ = lastUpdate_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.result_ = result_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ if (exceptionBuilder_ == null) {
+ result.exception_ = exception_;
} else {
- result.snapshot_ = snapshotBuilder_.build();
+ result.exception_ = exceptionBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
@@ -43167,32 +44705,39 @@ public final class MasterProtos {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse)other);
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse other) {
- if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()) return this;
- if (other.hasDone()) {
- setDone(other.getDone());
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this;
+ if (other.hasState()) {
+ setState(other.getState());
}
- if (other.hasSnapshot()) {
- mergeSnapshot(other.getSnapshot());
+ if (other.hasStartTime()) {
+ setStartTime(other.getStartTime());
+ }
+ if (other.hasLastUpdate()) {
+ setLastUpdate(other.getLastUpdate());
+ }
+ if (other.hasResult()) {
+ setResult(other.getResult());
+ }
+ if (other.hasException()) {
+ mergeException(other.getException());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
- if (hasSnapshot()) {
- if (!getSnapshot().isInitialized()) {
-
- return false;
- }
+ if (!hasState()) {
+
+ return false;
}
return true;
}
@@ -43201,11 +44746,11 @@ public final class MasterProtos {
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse parsedMessage = null;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) e.getUnfinishedMessage();
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.G
<TRUNCATED>
[08/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework
Posted by jm...@apache.org.
HBASE-13202 Procedure v2 - core framework
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/04246c6c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/04246c6c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/04246c6c
Branch: refs/heads/hbase-11339
Commit: 04246c6c3d04ab2bdc75732d530f38c739be9740
Parents: d20c08e
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 20:44:56 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:37 2015 +0100
----------------------------------------------------------------------
.../hadoop/hbase/io/util/StreamUtils.java | 12 +-
.../hadoop/hbase/util/ForeignExceptionUtil.java | 109 +
hbase-procedure/pom.xml | 181 +
.../hbase/procedure2/OnePhaseProcedure.java | 28 +
.../hadoop/hbase/procedure2/Procedure.java | 680 ++
.../procedure2/ProcedureAbortedException.java | 42 +
.../hbase/procedure2/ProcedureException.java | 45 +
.../hbase/procedure2/ProcedureExecutor.java | 1077 +++
.../procedure2/ProcedureFairRunQueues.java | 173 +
.../hbase/procedure2/ProcedureResult.java | 95 +
.../hbase/procedure2/ProcedureRunnableSet.java | 78 +
.../procedure2/ProcedureSimpleRunQueue.java | 121 +
.../procedure2/ProcedureYieldException.java | 40 +
.../procedure2/RemoteProcedureException.java | 116 +
.../hbase/procedure2/RootProcedureState.java | 185 +
.../hbase/procedure2/SequentialProcedure.java | 81 +
.../hbase/procedure2/StateMachineProcedure.java | 166 +
.../hbase/procedure2/TwoPhaseProcedure.java | 28 +
.../hbase/procedure2/store/ProcedureStore.java | 121 +
.../procedure2/store/ProcedureStoreTracker.java | 540 ++
.../CorruptedWALProcedureStoreException.java | 43 +
.../procedure2/store/wal/ProcedureWALFile.java | 152 +
.../store/wal/ProcedureWALFormat.java | 234 +
.../store/wal/ProcedureWALFormatReader.java | 166 +
.../procedure2/store/wal/WALProcedureStore.java | 721 ++
.../hadoop/hbase/procedure2/util/ByteSlot.java | 111 +
.../hbase/procedure2/util/StringUtils.java | 80 +
.../procedure2/util/TimeoutBlockingQueue.java | 217 +
.../procedure2/ProcedureTestingUtility.java | 163 +
.../procedure2/TestProcedureExecution.java | 338 +
.../procedure2/TestProcedureFairRunQueues.java | 155 +
.../hbase/procedure2/TestProcedureRecovery.java | 488 ++
.../procedure2/TestProcedureReplayOrder.java | 226 +
.../store/TestProcedureStoreTracker.java | 168 +
.../store/wal/TestWALProcedureStore.java | 267 +
.../util/TestTimeoutBlockingQueue.java | 137 +
hbase-protocol/pom.xml | 1 +
.../protobuf/generated/ProcedureProtos.java | 7219 ++++++++++++++++++
.../src/main/protobuf/Procedure.proto | 114 +
pom.xml | 21 +-
40 files changed, 14934 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 314ed2b..0b442a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -120,7 +120,7 @@ public class StreamUtils {
/**
* Reads a varInt value stored in an array.
- *
+ *
* @param input
* Input array where the varInt is available
* @param offset
@@ -198,4 +198,14 @@ public class StreamUtils {
out.write((byte) (0xff & (v >> 8)));
out.write((byte) (0xff & v));
}
+
+ public static long readLong(InputStream in) throws IOException {
+ long result = 0;
+ for (int shift = 56; shift >= 0; shift -= 8) {
+ long x = in.read();
+ if (x < 0) throw new IOException("EOF");
+ result |= (x << shift);
+ }
+ return result;
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
new file mode 100644
index 0000000..a0006ed
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage;
+
+/**
+ * Helper to convert Exceptions and StackTraces from/to protobuf.
+ * (see ErrorHandling.proto for the internal of the proto messages)
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class ForeignExceptionUtil {
+ private ForeignExceptionUtil() { }
+
+ public static IOException toIOException(final ForeignExceptionMessage eem) {
+ GenericExceptionMessage gem = eem.getGenericException();
+ StackTraceElement[] trace = toStackTrace(gem.getTraceList());
+ RemoteException re = new RemoteException(gem.getClassName(), gem.getMessage());
+ re.setStackTrace(trace);
+ return re.unwrapRemoteException();
+ }
+
+ public static ForeignExceptionMessage toProtoForeignException(String source, Throwable t) {
+ GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder();
+ gemBuilder.setClassName(t.getClass().getName());
+ if (t.getMessage() != null) {
+ gemBuilder.setMessage(t.getMessage());
+ }
+ // set the stack trace, if there is one
+ List<StackTraceElementMessage> stack = toProtoStackTraceElement(t.getStackTrace());
+ if (stack != null) {
+ gemBuilder.addAllTrace(stack);
+ }
+ GenericExceptionMessage payload = gemBuilder.build();
+ ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder();
+ exception.setGenericException(payload).setSource(source);
+ return exception.build();
+ }
+
+ /**
+ * Convert a stack trace to list of {@link StackTraceElement}.
+ * @param trace the stack trace to convert to protobuf message
+ * @return <tt>null</tt> if the passed stack is <tt>null</tt>.
+ */
+ public static List<StackTraceElementMessage> toProtoStackTraceElement(StackTraceElement[] trace) {
+ // if there is no stack trace, ignore it and just return the message
+ if (trace == null) return null;
+ // build the stack trace for the message
+ List<StackTraceElementMessage> pbTrace = new ArrayList<StackTraceElementMessage>(trace.length);
+ for (StackTraceElement elem : trace) {
+ StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder();
+ stackBuilder.setDeclaringClass(elem.getClassName());
+ if (elem.getFileName() != null) {
+ stackBuilder.setFileName(elem.getFileName());
+ }
+ stackBuilder.setLineNumber(elem.getLineNumber());
+ stackBuilder.setMethodName(elem.getMethodName());
+ pbTrace.add(stackBuilder.build());
+ }
+ return pbTrace;
+ }
+
+ /**
+ * Unwind a serialized array of {@link StackTraceElementMessage}s to a
+ * {@link StackTraceElement}s.
+ * @param traceList list that was serialized
+ * @return the deserialized list or <tt>null</tt> if it couldn't be unwound (e.g. wasn't set on
+ * the sender).
+ */
+ public static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) {
+ if (traceList == null || traceList.size() == 0) {
+ return new StackTraceElement[0]; // empty array
+ }
+ StackTraceElement[] trace = new StackTraceElement[traceList.size()];
+ for (int i = 0; i < traceList.size(); i++) {
+ StackTraceElementMessage elem = traceList.get(i);
+ trace[i] = new StackTraceElement(
+ elem.getDeclaringClass(), elem.getMethodName(),
+ elem.hasFileName() ? elem.getFileName() : null,
+ elem.getLineNumber());
+ }
+ return trace;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml
new file mode 100644
index 0000000..9683db2
--- /dev/null
+++ b/hbase-procedure/pom.xml
@@ -0,0 +1,181 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>hbase</artifactId>
+ <groupId>org.apache.hbase</groupId>
+ <version>2.0.0-SNAPSHOT</version>
+ <relativePath>..</relativePath>
+ </parent>
+
+ <artifactId>hbase-procedure</artifactId>
+ <name>HBase - Procedure</name>
+ <description>Procedure Framework</description>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-site-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <!-- Make a jar and put the sources in the jar -->
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <!--Make it so assembly:single does nothing in here-->
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>${maven.assembly.version}</version>
+ <configuration>
+ <skipAssembly>true</skipAssembly>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <!-- Always skip the second part executions, since we only run
+ simple unit tests in this module. -->
+ <executions>
+ <execution>
+ <id>secondPartTestsExecution</id>
+ <phase>test</phase>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-common</artifactId>
+ <version>${project.version}</version>
+ <classifier>tests</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-annotations</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-protocol</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ </dependency>
+ </dependencies>
+
+ <profiles>
+ <!-- Profiles for building against different hadoop versions -->
+ <profile>
+ <id>hadoop-1.1</id>
+ <activation>
+ <property>
+ <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+ <!--h1--><name>hadoop.profile</name><value>1.1</value>
+ </property>
+ </activation>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-core</artifactId>
+ </dependency>
+ </dependencies>
+ </profile>
+ <profile>
+ <id>hadoop-1.0</id>
+ <activation>
+ <property>
+ <name>hadoop.profile</name>
+ <value>1.0</value>
+ </property>
+ </activation>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-core</artifactId>
+ </dependency>
+ </dependencies>
+ </profile>
+ <!--
+ profile for building against Hadoop 2.0.0-alpha. Activate using:
+ mvn -Dhadoop.profile=2.0
+ -->
+ <profile>
+ <id>hadoop-2.0</id>
+ <activation>
+ <property>
+ <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+ <!--h2--><name>!hadoop.profile</name>
+ </property>
+ </activation>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ </dependency>
+ </dependencies>
+ </profile>
+ <!--
+ profile for building against Hadoop 3.0.x. Activate using:
+ mvn -Dhadoop.profile=3.0
+ -->
+ <profile>
+ <id>hadoop-3.0</id>
+ <activation>
+ <property>
+ <name>hadoop.profile</name>
+ <value>3.0</value>
+ </property>
+ </activation>
+ <properties>
+ <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ </dependency>
+ </dependencies>
+ </profile>
+ </profiles>
+</project>
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java
new file mode 100644
index 0000000..1c3be2d
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class OnePhaseProcedure<TEnvironment> extends Procedure<TEnvironment> {
+ // TODO (e.g. used by online snapshots)
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
new file mode 100644
index 0000000..338fcad
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -0,0 +1,680 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+
+/**
+ * Base Procedure class responsible to handle the Procedure Metadata
+ * e.g. state, startTime, lastUpdate, stack-indexes, ...
+ *
+ * execute() is called each time the procedure is executed.
+ * it may be called multiple times in case of failure and restart, so the
+ * code must be idempotent.
+ * the return is a set of sub-procedures or null in case the procedure doesn't
+ * have sub-procedures. Once the sub-procedures are successfully completed
+ * the execute() method is called again, you should think at it as a stack:
+ * -> step 1
+ * ---> step 2
+ * -> step 1
+ *
+ * rollback() is called when the procedure or one of the sub-procedures is failed.
+ * the rollback step is supposed to cleanup the resources created during the
+ * execute() step. in case of failure and restart rollback() may be called
+ * multiple times, so the code must be idempotent.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
+ // unchanged after initialization
+ private String owner = null;
+ private Long parentProcId = null;
+ private Long procId = null;
+ private long startTime;
+
+ // runtime state, updated every operation
+ private ProcedureState state = ProcedureState.INITIALIZING;
+ private Integer timeout = null;
+ private int[] stackIndexes = null;
+ private int childrenLatch = 0;
+ private long lastUpdate;
+
+ private RemoteProcedureException exception = null;
+ private byte[] result = null;
+
+ /**
+ * The main code of the procedure. It must be idempotent since execute()
+ * may be called multiple time in case of machine failure in the middle
+ * of the execution.
+ * @return a set of sub-procedures or null if there is nothing else to execute.
+ */
+ protected abstract Procedure[] execute(TEnvironment env)
+ throws ProcedureYieldException;
+
+ /**
+ * The code to undo what done by the execute() code.
+ * It is called when the procedure or one of the sub-procedure failed or an
+ * abort was requested. It should cleanup all the resources created by
+ * the execute() call. The implementation must be idempotent since rollback()
+ * may be called multiple time in case of machine failure in the middle
+ * of the execution.
+ * @throws IOException temporary failure, the rollback will retry later
+ */
+ protected abstract void rollback(TEnvironment env)
+ throws IOException;
+
+ /**
+ * The abort() call is asynchronous and each procedure must decide how to deal
+ * with that, if they want to be abortable. The simplest implementation
+ * is to have an AtomicBoolean set in the abort() method and then the execute()
+ * will check if the abort flag is set or not.
+ * abort() may be called multiple times from the client, so the implementation
+ * must be idempotent.
+ *
+ * NOTE: abort() is not like Thread.interrupt() it is just a notification
+ * that allows the procedure implementor where to abort to avoid leak and
+ * have a better control on what was executed and what not.
+ */
+ protected abstract boolean abort(TEnvironment env);
+
+ /**
+ * The user-level code of the procedure may have some state to
+ * persist (e.g. input arguments) to be able to resume on failure.
+ * @param stream the stream that will contain the user serialized data
+ */
+ protected abstract void serializeStateData(final OutputStream stream)
+ throws IOException;
+
+ /**
+ * Called on store load to allow the user to decode the previously serialized
+ * state.
+ * @param stream the stream that contains the user serialized data
+ */
+ protected abstract void deserializeStateData(final InputStream stream)
+ throws IOException;
+
+ /**
+ * The user should override this method, and try to take a lock if necessary.
+ * A lock can be anything, and it is up to the implementor.
+ * Example: in our Master we can execute request in parallel for different tables
+ * create t1 and create t2 can be executed at the same time.
+ * anything else on t1/t2 is queued waiting that specific table create to happen.
+ *
+ * @return true if the lock was acquired and false otherwise
+ */
+ protected boolean acquireLock(final TEnvironment env) {
+ return true;
+ }
+
+ /**
+ * The user should override this method, and release lock if necessary.
+ */
+ protected void releaseLock(final TEnvironment env) {
+ // no-op
+ }
+
+ /**
+ * Called when the procedure is loaded for replay.
+ * The procedure implementor may use this method to perform some quick
+ * operation before replay.
+ * e.g. failing the procedure if the state on replay may be unknown.
+ */
+ protected void beforeReplay(final TEnvironment env) {
+ // no-op
+ }
+
+ /**
+ * Called when the procedure is marked as completed (success or rollback).
+ * The procedure implementor may use this method to cleanup in-memory states.
+ * This operation will not be retried on failure.
+ */
+ protected void completionCleanup(final TEnvironment env) {
+ // no-op
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ toStringClassDetails(sb);
+
+ if (procId != null) {
+ sb.append(" id=");
+ sb.append(getProcId());
+ }
+
+ if (hasParent()) {
+ sb.append(" parent=");
+ sb.append(getParentProcId());
+ }
+
+ if (hasOwner()) {
+ sb.append(" owner=");
+ sb.append(getOwner());
+ }
+
+ sb.append(" state=");
+ sb.append(getState());
+ return sb.toString();
+ }
+
+ /**
+ * Extend the toString() information with the procedure details
+ * e.g. className and parameters
+ * @param builder the string builder to use to append the proc specific information
+ */
+ protected void toStringClassDetails(StringBuilder builder) {
+ builder.append(getClass().getName());
+ }
+
+ /**
+ * @return the serialized result if any, otherwise null
+ */
+ public byte[] getResult() {
+ return result;
+ }
+
+ /**
+ * The procedure may leave a "result" on completion.
+ * @param result the serialized result that will be passed to the client
+ */
+ protected void setResult(final byte[] result) {
+ this.result = result;
+ }
+
+ public long getProcId() {
+ return procId;
+ }
+
+ public boolean hasParent() {
+ return parentProcId != null;
+ }
+
+ public boolean hasException() {
+ return exception != null;
+ }
+
+ public boolean hasTimeout() {
+ return timeout != null;
+ }
+
+ public long getParentProcId() {
+ return parentProcId;
+ }
+
+ /**
+ * @return true if the procedure has failed.
+ * true may mean failed but not yet rolledback or failed and rolledback.
+ */
+ public synchronized boolean isFailed() {
+ return exception != null || state == ProcedureState.ROLLEDBACK;
+ }
+
+ /**
+ * @return true if the procedure is finished successfully.
+ */
+ public synchronized boolean isSuccess() {
+ return state == ProcedureState.FINISHED && exception == null;
+ }
+
+ /**
+ * @return true if the procedure is finished. The Procedure may be completed
+ * successfuly or failed and rolledback.
+ */
+ public synchronized boolean isFinished() {
+ switch (state) {
+ case ROLLEDBACK:
+ return true;
+ case FINISHED:
+ return exception == null;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ /**
+ * @return true if the procedure is waiting for a child to finish or for an external event.
+ */
+ public synchronized boolean isWaiting() {
+ switch (state) {
+ case WAITING:
+ case WAITING_TIMEOUT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ public synchronized RemoteProcedureException getException() {
+ return exception;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public synchronized long getLastUpdate() {
+ return lastUpdate;
+ }
+
+ public synchronized long elapsedTime() {
+ return lastUpdate - startTime;
+ }
+
+ /**
+ * @param timeout timeout in msec
+ */
+ protected void setTimeout(final int timeout) {
+ this.timeout = timeout;
+ }
+
+ /**
+ * @return the timeout in msec
+ */
+ public int getTimeout() {
+ return timeout;
+ }
+
+ /**
+ * @return the remaining time before the timeout
+ */
+ public long getTimeRemaining() {
+ return Math.max(0, timeout - (EnvironmentEdgeManager.currentTime() - startTime));
+ }
+
+ protected void setOwner(final String owner) {
+ this.owner = StringUtils.isEmpty(owner) ? null : owner;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public boolean hasOwner() {
+ return owner != null;
+ }
+
+ @VisibleForTesting
+ @InterfaceAudience.Private
+ protected synchronized void setState(final ProcedureState state) {
+ this.state = state;
+ updateTimestamp();
+ }
+
+ @InterfaceAudience.Private
+ protected synchronized ProcedureState getState() {
+ return state;
+ }
+
+ protected void setFailure(final String source, final Throwable cause) {
+ setFailure(new RemoteProcedureException(source, cause));
+ }
+
+ protected synchronized void setFailure(final RemoteProcedureException exception) {
+ this.exception = exception;
+ if (!isFinished()) {
+ setState(ProcedureState.FINISHED);
+ }
+ }
+
+ protected void setAbortFailure(final String source, final String msg) {
+ setFailure(source, new ProcedureAbortedException(msg));
+ }
+
+ @InterfaceAudience.Private
+ protected synchronized boolean setTimeoutFailure() {
+ if (state == ProcedureState.WAITING_TIMEOUT) {
+ long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate;
+ setFailure("ProcedureExecutor", new TimeoutException(
+ "Operation timed out after " + StringUtils.humanTimeDiff(timeDiff)));
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Called by the ProcedureExecutor to assign the ID to the newly created procedure.
+ */
+ @VisibleForTesting
+ @InterfaceAudience.Private
+ protected void setProcId(final long procId) {
+ this.procId = procId;
+ this.startTime = EnvironmentEdgeManager.currentTime();
+ setState(ProcedureState.RUNNABLE);
+ }
+
+ /**
+ * Called by the ProcedureExecutor to assign the parent to the newly created procedure.
+ */
+ @InterfaceAudience.Private
+ protected void setParentProcId(final long parentProcId) {
+ this.parentProcId = parentProcId;
+ }
+
+ /**
+ * Internal method called by the ProcedureExecutor that starts the
+ * user-level code execute().
+ */
+ @InterfaceAudience.Private
+ protected Procedure[] doExecute(final TEnvironment env)
+ throws ProcedureYieldException {
+ try {
+ updateTimestamp();
+ return execute(env);
+ } finally {
+ updateTimestamp();
+ }
+ }
+
+ /**
+ * Internal method called by the ProcedureExecutor that starts the
+ * user-level code rollback().
+ */
+ @InterfaceAudience.Private
+ protected void doRollback(final TEnvironment env) throws IOException {
+ try {
+ updateTimestamp();
+ rollback(env);
+ } finally {
+ updateTimestamp();
+ }
+ }
+
+ /**
+ * Called on store load to initialize the Procedure internals after
+ * the creation/deserialization.
+ */
+ @InterfaceAudience.Private
+ protected void setStartTime(final long startTime) {
+ this.startTime = startTime;
+ }
+
+ /**
+ * Called on store load to initialize the Procedure internals after
+ * the creation/deserialization.
+ */
+ private synchronized void setLastUpdate(final long lastUpdate) {
+ this.lastUpdate = lastUpdate;
+ }
+
+ protected synchronized void updateTimestamp() {
+ this.lastUpdate = EnvironmentEdgeManager.currentTime();
+ }
+
+ /**
+ * Called by the ProcedureExecutor on procedure-load to restore the latch state
+ */
+ @InterfaceAudience.Private
+ protected synchronized void setChildrenLatch(final int numChildren) {
+ this.childrenLatch = numChildren;
+ }
+
+ /**
+ * Called by the ProcedureExecutor on procedure-load to restore the latch state
+ */
+ @InterfaceAudience.Private
+ protected synchronized void incChildrenLatch() {
+ // TODO: can this be inferred from the stack? I think so...
+ this.childrenLatch++;
+ }
+
+ /**
+ * Called by the ProcedureExecutor to notify that one of the sub-procedures
+ * has completed.
+ */
+ @InterfaceAudience.Private
+ protected synchronized boolean childrenCountDown() {
+ assert childrenLatch > 0;
+ return --childrenLatch == 0;
+ }
+
+ /**
+ * Called by the RootProcedureState on procedure execution.
+ * Each procedure store its stack-index positions.
+ */
+ @InterfaceAudience.Private
+ protected synchronized void addStackIndex(final int index) {
+ if (stackIndexes == null) {
+ stackIndexes = new int[] { index };
+ } else {
+ int count = stackIndexes.length;
+ stackIndexes = Arrays.copyOf(stackIndexes, count + 1);
+ stackIndexes[count] = index;
+ }
+ }
+
+ @InterfaceAudience.Private
+ protected synchronized boolean removeStackIndex() {
+ if (stackIndexes.length > 1) {
+ stackIndexes = Arrays.copyOf(stackIndexes, stackIndexes.length - 1);
+ return false;
+ } else {
+ stackIndexes = null;
+ return true;
+ }
+ }
+
+ /**
+ * Called on store load to initialize the Procedure internals after
+ * the creation/deserialization.
+ */
+ @InterfaceAudience.Private
+ protected synchronized void setStackIndexes(final List<Integer> stackIndexes) {
+ this.stackIndexes = new int[stackIndexes.size()];
+ for (int i = 0; i < this.stackIndexes.length; ++i) {
+ this.stackIndexes[i] = stackIndexes.get(i);
+ }
+ }
+
+ @InterfaceAudience.Private
+ protected synchronized boolean wasExecuted() {
+ return stackIndexes != null;
+ }
+
+ @InterfaceAudience.Private
+ protected synchronized int[] getStackIndexes() {
+ return stackIndexes;
+ }
+
+ @Override
+ public int compareTo(final Procedure other) {
+ long diff = getProcId() - other.getProcId();
+ return (diff < 0) ? -1 : (diff > 0) ? 1 : 0;
+ }
+
+ /*
+ * Helper to lookup the root Procedure ID given a specified procedure.
+ */
+ @InterfaceAudience.Private
+ protected static Long getRootProcedureId(final Map<Long, Procedure> procedures, Procedure proc) {
+ while (proc.hasParent()) {
+ proc = procedures.get(proc.getParentProcId());
+ if (proc == null) return null;
+ }
+ return proc.getProcId();
+ }
+
+ protected static Procedure newInstance(final String className) throws IOException {
+ try {
+ Class<?> clazz = Class.forName(className);
+ if (!Modifier.isPublic(clazz.getModifiers())) {
+ throw new Exception("the " + clazz + " class is not public");
+ }
+
+ Constructor<?> ctor = clazz.getConstructor();
+ assert ctor != null : "no constructor found";
+ if (!Modifier.isPublic(ctor.getModifiers())) {
+ throw new Exception("the " + clazz + " constructor is not public");
+ }
+ return (Procedure)ctor.newInstance();
+ } catch (Exception e) {
+ throw new IOException("The procedure class " + className +
+ " must be accessible and have an empty constructor", e);
+ }
+ }
+
+ protected static void validateClass(final Procedure proc) throws IOException {
+ try {
+ Class<?> clazz = proc.getClass();
+ if (!Modifier.isPublic(clazz.getModifiers())) {
+ throw new Exception("the " + clazz + " class is not public");
+ }
+
+ Constructor<?> ctor = clazz.getConstructor();
+ assert ctor != null;
+ if (!Modifier.isPublic(ctor.getModifiers())) {
+ throw new Exception("the " + clazz + " constructor is not public");
+ }
+ } catch (Exception e) {
+ throw new IOException("The procedure class " + proc.getClass().getName() +
+ " must be accessible and have an empty constructor", e);
+ }
+ }
+
+ /**
+ * Helper to convert the procedure to protobuf.
+ * Used by ProcedureStore implementations.
+ */
+ @InterfaceAudience.Private
+ public static ProcedureProtos.Procedure convert(final Procedure proc)
+ throws IOException {
+ Preconditions.checkArgument(proc != null);
+ validateClass(proc);
+
+ ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder()
+ .setClassName(proc.getClass().getName())
+ .setProcId(proc.getProcId())
+ .setState(proc.getState())
+ .setStartTime(proc.getStartTime())
+ .setLastUpdate(proc.getLastUpdate());
+
+ if (proc.hasParent()) {
+ builder.setParentId(proc.getParentProcId());
+ }
+
+ if (proc.hasTimeout()) {
+ builder.setTimeout(proc.getTimeout());
+ }
+
+ if (proc.hasOwner()) {
+ builder.setOwner(proc.getOwner());
+ }
+
+ int[] stackIds = proc.getStackIndexes();
+ if (stackIds != null) {
+ for (int i = 0; i < stackIds.length; ++i) {
+ builder.addStackId(stackIds[i]);
+ }
+ }
+
+ if (proc.hasException()) {
+ RemoteProcedureException exception = proc.getException();
+ builder.setException(
+ RemoteProcedureException.toProto(exception.getSource(), exception.getCause()));
+ }
+
+ byte[] result = proc.getResult();
+ if (result != null) {
+ builder.setResult(ByteStringer.wrap(result));
+ }
+
+ ByteString.Output stateStream = ByteString.newOutput();
+ proc.serializeStateData(stateStream);
+ if (stateStream.size() > 0) {
+ builder.setStateData(stateStream.toByteString());
+ }
+
+ return builder.build();
+ }
+
+ /**
+ * Helper to convert the protobuf procedure.
+ * Used by ProcedureStore implementations.
+ *
+ * TODO: OPTIMIZATION: some of the field never change during the execution
+ * (e.g. className, procId, parentId, ...).
+ * We can split in 'data' and 'state', and the store
+ * may take advantage of it by storing the data only on insert().
+ */
+ @InterfaceAudience.Private
+ public static Procedure convert(final ProcedureProtos.Procedure proto)
+ throws IOException {
+ // Procedure from class name
+ Procedure proc = Procedure.newInstance(proto.getClassName());
+
+ // set fields
+ proc.setProcId(proto.getProcId());
+ proc.setState(proto.getState());
+ proc.setStartTime(proto.getStartTime());
+ proc.setLastUpdate(proto.getLastUpdate());
+
+ if (proto.hasParentId()) {
+ proc.setParentProcId(proto.getParentId());
+ }
+
+ if (proto.hasOwner()) {
+ proc.setOwner(proto.getOwner());
+ }
+
+ if (proto.hasTimeout()) {
+ proc.setTimeout(proto.getTimeout());
+ }
+
+ if (proto.getStackIdCount() > 0) {
+ proc.setStackIndexes(proto.getStackIdList());
+ }
+
+ if (proto.hasException()) {
+ assert proc.getState() == ProcedureState.FINISHED ||
+ proc.getState() == ProcedureState.ROLLEDBACK :
+ "The procedure must be failed (waiting to rollback) or rolledback";
+ proc.setFailure(RemoteProcedureException.fromProto(proto.getException()));
+ }
+
+ if (proto.hasResult()) {
+ proc.setResult(proto.getResult().toByteArray());
+ }
+
+ // we want to call deserialize even when the stream is empty, mainly for testing.
+ proc.deserializeStateData(proto.getStateData().newInput());
+
+ return proc;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java
new file mode 100644
index 0000000..2e409cf
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Thrown when a procedure is aborted
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ProcedureAbortedException extends ProcedureException {
+ /** default constructor */
+ public ProcedureAbortedException() {
+ super();
+ }
+
+ /**
+ * Constructor
+ * @param s message
+ */
+ public ProcedureAbortedException(String s) {
+ super(s);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java
new file mode 100644
index 0000000..9f922b1
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ProcedureException extends IOException {
+ /** default constructor */
+ public ProcedureException() {
+ super();
+ }
+
+ /**
+ * Constructor
+ * @param s message
+ */
+ public ProcedureException(String s) {
+ super(s);
+ }
+
+ public ProcedureException(Throwable t) {
+ super(t);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
new file mode 100644
index 0000000..2982058
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -0,0 +1,1077 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.HashSet;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue;
+import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Thread Pool that executes the submitted procedures.
+ * The executor has a ProcedureStore associated.
+ * Each operation is logged and on restart the pending procedures are resumed.
+ *
+ * Unless the Procedure code throws an error (e.g. invalid user input)
+ * the procedure will complete (at some point in time), On restart the pending
+ * procedures are resumed and the once failed will be rolledback.
+ *
+ * The user can add procedures to the executor via submitProcedure(proc)
+ * check for the finished state via isFinished(procId)
+ * and get the result via getResult(procId)
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ProcedureExecutor<TEnvironment> {
+ private static final Log LOG = LogFactory.getLog(ProcedureExecutor.class);
+
+ Testing testing = null;
+ public static class Testing {
+ protected boolean killBeforeStoreUpdate = false;
+ protected boolean toggleKillBeforeStoreUpdate = false;
+
+ protected boolean shouldKillBeforeStoreUpdate() {
+ final boolean kill = this.killBeforeStoreUpdate;
+ if (this.toggleKillBeforeStoreUpdate) {
+ this.killBeforeStoreUpdate = !kill;
+ LOG.warn("Toggle Kill before store update to: " + this.killBeforeStoreUpdate);
+ }
+ return kill;
+ }
+ }
+
+ public interface ProcedureExecutorListener {
+ void procedureLoaded(long procId);
+ void procedureAdded(long procId);
+ void procedureFinished(long procId);
+ }
+
+ /**
+ * Used by the TimeoutBlockingQueue to get the timeout interval of the procedure
+ */
+ private static class ProcedureTimeoutRetriever implements TimeoutRetriever<Procedure> {
+ @Override
+ public long getTimeout(Procedure proc) {
+ return proc.getTimeRemaining();
+ }
+
+ @Override
+ public TimeUnit getTimeUnit(Procedure proc) {
+ return TimeUnit.MILLISECONDS;
+ }
+ }
+
+ /**
+ * Internal cleaner that removes the completed procedure results after a TTL.
+ * NOTE: This is a special case handled in timeoutLoop().
+ *
+ * Since the client code looks more or less like:
+ * procId = master.doOperation()
+ * while (master.getProcResult(procId) == ProcInProgress);
+ * The master should not throw away the proc result as soon as the procedure is done
+ * but should wait a result request from the client (see executor.removeResult(procId))
+ * The client will call something like master.isProcDone() or master.getProcResult()
+ * which will return the result/state to the client, and it will mark the completed
+ * proc as ready to delete. note that the client may not receive the response from
+ * the master (e.g. master failover) so, if we delay a bit the real deletion of
+ * the proc result the client will be able to get the result the next try.
+ */
+ private static class CompletedProcedureCleaner<TEnvironment> extends Procedure<TEnvironment> {
+ private static final Log LOG = LogFactory.getLog(CompletedProcedureCleaner.class);
+
+ private static final String CLEANER_INTERVAL_CONF_KEY = "hbase.procedure.cleaner.interval";
+ private static final int DEFAULT_CLEANER_INTERVAL = 30 * 1000; // 30sec
+
+ private static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl";
+ private static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min
+
+ private static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl";
+ private static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min
+
+ private final Map<Long, ProcedureResult> completed;
+ private final ProcedureStore store;
+ private final Configuration conf;
+
+ public CompletedProcedureCleaner(final Configuration conf, final ProcedureStore store,
+ final Map<Long, ProcedureResult> completedMap) {
+ // set the timeout interval that triggers the periodic-procedure
+ setTimeout(conf.getInt(CLEANER_INTERVAL_CONF_KEY, DEFAULT_CLEANER_INTERVAL));
+ this.completed = completedMap;
+ this.store = store;
+ this.conf = conf;
+ }
+
+ public void periodicExecute(final TEnvironment env) {
+ if (completed.isEmpty()) {
+ LOG.debug("no completed procedures to cleanup");
+ return;
+ }
+
+ final long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL);
+ final long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL);
+
+ long now = EnvironmentEdgeManager.currentTime();
+ Iterator<Map.Entry<Long, ProcedureResult>> it = completed.entrySet().iterator();
+ while (it.hasNext() && store.isRunning()) {
+ Map.Entry<Long, ProcedureResult> entry = it.next();
+ ProcedureResult result = entry.getValue();
+
+ // TODO: Select TTL based on Procedure type
+ if ((result.hasClientAckTime() && (now - result.getClientAckTime()) >= evictAckTtl) ||
+ (now - result.getLastUpdate()) >= evictTtl) {
+ LOG.debug("Evict completed procedure " + entry.getKey());
+ store.delete(entry.getKey());
+ it.remove();
+ }
+ }
+ }
+
+ @Override
+ protected Procedure[] execute(final TEnvironment env) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected void rollback(final TEnvironment env) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ protected boolean abort(final TEnvironment env) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ /**
+ * Map the the procId returned by submitProcedure(), the Root-ProcID, to the ProcedureResult.
+ * Once a Root-Procedure completes (success or failure), the result will be added to this map.
+ * The user of ProcedureExecutor should call getResult(procId) to get the result.
+ */
+ private final ConcurrentHashMap<Long, ProcedureResult> completed =
+ new ConcurrentHashMap<Long, ProcedureResult>();
+
+ /**
+ * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState.
+ * The RootProcedureState contains the execution stack of the Root-Procedure,
+ * It is added to the map by submitProcedure() and removed on procedure completion.
+ */
+ private final ConcurrentHashMap<Long, RootProcedureState> rollbackStack =
+ new ConcurrentHashMap<Long, RootProcedureState>();
+
+ /**
+ * Helper map to lookup the live procedures by ID.
+ * This map contains every procedure. root-procedures and subprocedures.
+ */
+ private final ConcurrentHashMap<Long, Procedure> procedures =
+ new ConcurrentHashMap<Long, Procedure>();
+
+ /**
+ * Timeout Queue that contains Procedures in a WAITING_TIMEOUT state
+ * or periodic procedures.
+ */
+ private final TimeoutBlockingQueue<Procedure> waitingTimeout =
+ new TimeoutBlockingQueue<Procedure>(new ProcedureTimeoutRetriever());
+
+ /**
+ * Queue that contains runnable procedures.
+ */
+ private final ProcedureRunnableSet runnables;
+
+ // TODO
+ private final ReentrantLock submitLock = new ReentrantLock();
+ private final AtomicLong lastProcId = new AtomicLong(-1);
+
+ private final CopyOnWriteArrayList<ProcedureExecutorListener> listeners =
+ new CopyOnWriteArrayList<ProcedureExecutorListener>();
+
+ private final AtomicInteger activeExecutorCount = new AtomicInteger(0);
+ private final AtomicBoolean running = new AtomicBoolean(false);
+ private final TEnvironment environment;
+ private final ProcedureStore store;
+ private final Configuration conf;
+
+ private Thread[] threads;
+
+ public ProcedureExecutor(final Configuration conf, final TEnvironment environment,
+ final ProcedureStore store) {
+ this(conf, environment, store, new ProcedureSimpleRunQueue());
+ }
+
+ public ProcedureExecutor(final Configuration conf, final TEnvironment environment,
+ final ProcedureStore store, final ProcedureRunnableSet runqueue) {
+ this.environment = environment;
+ this.runnables = runqueue;
+ this.store = store;
+ this.conf = conf;
+ }
+
+ private List<Map.Entry<Long, RootProcedureState>> load() throws IOException {
+ Preconditions.checkArgument(completed.isEmpty());
+ Preconditions.checkArgument(rollbackStack.isEmpty());
+ Preconditions.checkArgument(procedures.isEmpty());
+ Preconditions.checkArgument(waitingTimeout.isEmpty());
+ Preconditions.checkArgument(runnables.size() == 0);
+
+ // 1. Load the procedures
+ Iterator<Procedure> loader = store.load();
+ if (loader == null) {
+ lastProcId.set(0);
+ return null;
+ }
+
+ long logMaxProcId = 0;
+ int runnablesCount = 0;
+ while (loader.hasNext()) {
+ Procedure proc = loader.next();
+ proc.beforeReplay(getEnvironment());
+ procedures.put(proc.getProcId(), proc);
+ logMaxProcId = Math.max(logMaxProcId, proc.getProcId());
+ LOG.debug("Loading procedure state=" + proc.getState() +
+ " isFailed=" + proc.hasException() + ": " + proc);
+ if (!proc.hasParent() && !proc.isFinished()) {
+ rollbackStack.put(proc.getProcId(), new RootProcedureState());
+ }
+ if (proc.getState() == ProcedureState.RUNNABLE) {
+ runnablesCount++;
+ }
+ }
+ assert lastProcId.get() < 0;
+ lastProcId.set(logMaxProcId);
+
+ // 2. Initialize the stacks
+ TreeSet<Procedure> runnableSet = null;
+ HashSet<Procedure> waitingSet = null;
+ for (final Procedure proc: procedures.values()) {
+ Long rootProcId = getRootProcedureId(proc);
+ if (rootProcId == null) {
+ // The 'proc' was ready to run but the root procedure was rolledback?
+ runnables.addBack(proc);
+ continue;
+ }
+
+ if (!proc.hasParent() && proc.isFinished()) {
+ LOG.debug("The procedure is completed state=" + proc.getState() +
+ " isFailed=" + proc.hasException() + ": " + proc);
+ assert !rollbackStack.containsKey(proc.getProcId());
+ completed.put(proc.getProcId(), newResultFromProcedure(proc));
+ continue;
+ }
+
+ if (proc.hasParent() && !proc.isFinished()) {
+ Procedure parent = procedures.get(proc.getParentProcId());
+ // corrupted procedures are handled later at step 3
+ if (parent != null) {
+ parent.incChildrenLatch();
+ }
+ }
+
+ RootProcedureState procStack = rollbackStack.get(rootProcId);
+ procStack.loadStack(proc);
+
+ switch (proc.getState()) {
+ case RUNNABLE:
+ if (runnableSet == null) {
+ runnableSet = new TreeSet<Procedure>();
+ }
+ runnableSet.add(proc);
+ break;
+ case WAITING_TIMEOUT:
+ if (waitingSet == null) {
+ waitingSet = new HashSet<Procedure>();
+ }
+ waitingSet.add(proc);
+ break;
+ case FINISHED:
+ if (proc.hasException()) {
+ // add the proc to the runnables to perform the rollback
+ runnables.addBack(proc);
+ break;
+ }
+ case ROLLEDBACK:
+ case INITIALIZING:
+ String msg = "Unexpected " + proc.getState() + " state for " + proc;
+ LOG.error(msg);
+ throw new UnsupportedOperationException(msg);
+ default:
+ break;
+ }
+ }
+
+ // 3. Validate the stacks
+ List<Map.Entry<Long, RootProcedureState>> corrupted = null;
+ Iterator<Map.Entry<Long, RootProcedureState>> itStack = rollbackStack.entrySet().iterator();
+ while (itStack.hasNext()) {
+ Map.Entry<Long, RootProcedureState> entry = itStack.next();
+ RootProcedureState procStack = entry.getValue();
+ if (procStack.isValid()) continue;
+
+ for (Procedure proc: procStack.getSubprocedures()) {
+ procedures.remove(proc.getProcId());
+ if (runnableSet != null) runnableSet.remove(proc);
+ if (waitingSet != null) waitingSet.remove(proc);
+ }
+ itStack.remove();
+ if (corrupted == null) {
+ corrupted = new ArrayList<Map.Entry<Long, RootProcedureState>>();
+ }
+ corrupted.add(entry);
+ }
+
+ // 4. Push the runnables
+ if (runnableSet != null) {
+ // TODO: See ProcedureWALFormatReader.readInitEntry() some procedure
+ // may be started way before this stuff.
+ for (Procedure proc: runnableSet) {
+ if (!proc.hasParent()) {
+ sendProcedureLoadedNotification(proc.getProcId());
+ }
+ runnables.addBack(proc);
+ }
+ }
+ return corrupted;
+ }
+
+ public void start(int numThreads) throws IOException {
+ if (running.getAndSet(true)) {
+ LOG.warn("Already running");
+ return;
+ }
+
+ // We have numThreads executor + one timer thread used for timing out
+ // procedures and triggering periodic procedures.
+ threads = new Thread[numThreads + 1];
+ LOG.info("Starting procedure executor threads=" + threads.length);
+
+ // Initialize procedures executor
+ for (int i = 0; i < numThreads; ++i) {
+ threads[i] = new Thread("ProcedureExecutorThread-" + i) {
+ @Override
+ public void run() {
+ execLoop();
+ }
+ };
+ }
+
+ // Initialize procedures timeout handler (this is the +1 thread)
+ threads[numThreads] = new Thread("ProcedureExecutorTimeoutThread") {
+ @Override
+ public void run() {
+ timeoutLoop();
+ }
+ };
+
+ // Acquire the store lease.
+ store.recoverLease();
+
+ // TODO: Split in two steps.
+ // TODO: Handle corrupted procedure returned (probably just a WARN)
+ // The first one will make sure that we have the latest id,
+ // so we can start the threads and accept new procedures.
+ // The second step will do the actual load of old procedures.
+ load();
+
+ // Start the executors. Here we must have the lastProcId set.
+ for (int i = 0; i < threads.length; ++i) {
+ threads[i].start();
+ }
+
+ // Add completed cleaner
+ waitingTimeout.add(new CompletedProcedureCleaner(conf, store, completed));
+ }
+
+ public void stop() {
+ if (!running.getAndSet(false)) {
+ return;
+ }
+
+ LOG.info("Stopping the procedure executor");
+ runnables.signalAll();
+ waitingTimeout.signalAll();
+ }
+
+ public void join() {
+ boolean interrupted = false;
+
+ for (int i = 0; i < threads.length; ++i) {
+ try {
+ threads[i].join();
+ } catch (InterruptedException ex) {
+ interrupted = true;
+ }
+ }
+
+ if (interrupted) {
+ Thread.currentThread().interrupt();
+ }
+
+ completed.clear();
+ rollbackStack.clear();
+ procedures.clear();
+ waitingTimeout.clear();
+ runnables.clear();
+ lastProcId.set(-1);
+ }
+
+ public boolean isRunning() {
+ return running.get();
+ }
+
+ /**
+ * @return the number of execution threads.
+ */
+ public int getNumThreads() {
+ return threads == null ? 0 : (threads.length - 1);
+ }
+
+ public int getActiveExecutorCount() {
+ return activeExecutorCount.get();
+ }
+
+ public TEnvironment getEnvironment() {
+ return this.environment;
+ }
+
+ public ProcedureStore getStore() {
+ return this.store;
+ }
+
+ public void registerListener(ProcedureExecutorListener listener) {
+ this.listeners.add(listener);
+ }
+
+ public boolean unregisterListener(ProcedureExecutorListener listener) {
+ return this.listeners.remove(listener);
+ }
+
+ /**
+ * Add a new root-procedure to the executor.
+ * @param proc the new procedure to execute.
+ * @return the procedure id, that can be used to monitor the operation
+ */
+ public long submitProcedure(final Procedure proc) {
+ Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING);
+ Preconditions.checkArgument(isRunning());
+ Preconditions.checkArgument(lastProcId.get() >= 0);
+ Preconditions.checkArgument(!proc.hasParent());
+
+ // Initialize the Procedure ID
+ proc.setProcId(nextProcId());
+
+ // Commit the transaction
+ store.insert(proc, null);
+ LOG.debug("procedure " + proc + " added to the store");
+
+ // Create the rollback stack for the procedure
+ RootProcedureState stack = new RootProcedureState();
+ rollbackStack.put(proc.getProcId(), stack);
+
+ // Submit the new subprocedures
+ assert !procedures.containsKey(proc.getProcId());
+ procedures.put(proc.getProcId(), proc);
+ sendProcedureAddedNotification(proc.getProcId());
+ runnables.addBack(proc);
+ return proc.getProcId();
+ }
+
+ public ProcedureResult getResult(final long procId) {
+ return completed.get(procId);
+ }
+
+ /**
+ * Return true if the procedure is finished.
+ * The state may be "completed successfully" or "failed and rolledback".
+ * Use getResult() to check the state or get the result data.
+ * @param procId the ID of the procedure to check
+ * @return true if the procedure execution is finished, otherwise false.
+ */
+ public boolean isFinished(final long procId) {
+ return completed.containsKey(procId);
+ }
+
+ /**
+ * Return true if the procedure is started.
+ * @param procId the ID of the procedure to check
+ * @return true if the procedure execution is started, otherwise false.
+ */
+ public boolean isStarted(final long procId) {
+ Procedure proc = procedures.get(procId);
+ if (proc == null) {
+ return completed.get(procId) != null;
+ }
+ return proc.wasExecuted();
+ }
+
+ /**
+ * Mark the specified completed procedure, as ready to remove.
+ * @param procId the ID of the procedure to remove
+ */
+ public void removeResult(final long procId) {
+ ProcedureResult result = completed.get(procId);
+ if (result == null) {
+ assert !procedures.containsKey(procId) : "procId=" + procId + " is still running";
+ LOG.debug("Procedure procId=" + procId + " already removed by the cleaner");
+ return;
+ }
+
+ // The CompletedProcedureCleaner will take care of deletion, once the TTL is expired.
+ result.setClientAckTime(EnvironmentEdgeManager.currentTime());
+ }
+
+ /**
+ * Send an abort notification the specified procedure.
+ * Depending on the procedure implementation the abort can be considered or ignored.
+ * @param procId the procedure to abort
+ * @return true if the procedure exist and has received the abort, otherwise false.
+ */
+ public boolean abort(final long procId) {
+ Procedure proc = procedures.get(procId);
+ if (proc != null) {
+ return proc.abort(getEnvironment());
+ }
+ return false;
+ }
+
+ public Map<Long, ProcedureResult> getResults() {
+ return Collections.unmodifiableMap(completed);
+ }
+
+ public Procedure getProcedure(final long procId) {
+ return procedures.get(procId);
+ }
+
+ protected ProcedureRunnableSet getRunnableSet() {
+ return runnables;
+ }
+
+ /**
+ * Execution loop (N threads)
+ * while the executor is in a running state,
+ * fetch a procedure from the runnables queue and start the execution.
+ */
+ private void execLoop() {
+ while (isRunning()) {
+ Long procId = runnables.poll();
+ Procedure proc = procId != null ? procedures.get(procId) : null;
+ if (proc == null) continue;
+
+ try {
+ activeExecutorCount.incrementAndGet();
+ execLoop(proc);
+ } finally {
+ activeExecutorCount.decrementAndGet();
+ }
+ }
+ }
+
+ private void execLoop(Procedure proc) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("trying to start the execution of " + proc);
+ }
+
+ Long rootProcId = getRootProcedureId(proc);
+ if (rootProcId == null) {
+ // The 'proc' was ready to run but the root procedure was rolledback
+ executeRollback(proc);
+ return;
+ }
+
+ RootProcedureState procStack = rollbackStack.get(rootProcId);
+ if (procStack == null) return;
+
+ do {
+ // Try to acquire the execution
+ if (!procStack.acquire(proc)) {
+ if (procStack.setRollback()) {
+ // we have the 'rollback-lock' we can start rollingback
+ if (!executeRollback(rootProcId, procStack)) {
+ procStack.unsetRollback();
+ runnables.yield(proc);
+ }
+ } else {
+ // if we can't rollback means that some child is still running.
+ // the rollback will be executed after all the children are done.
+ // If the procedure was never executed, remove and mark it as rolledback.
+ if (!proc.wasExecuted()) {
+ if (!executeRollback(proc)) {
+ runnables.yield(proc);
+ }
+ }
+ }
+ break;
+ }
+
+ // Execute the procedure
+ assert proc.getState() == ProcedureState.RUNNABLE;
+ if (proc.acquireLock(getEnvironment())) {
+ execProcedure(procStack, proc);
+ proc.releaseLock(getEnvironment());
+ } else {
+ runnables.yield(proc);
+ }
+ procStack.release(proc);
+
+ // allows to kill the executor before something is stored to the wal.
+ // useful to test the procedure recovery.
+ if (testing != null && !isRunning()) {
+ break;
+ }
+
+ if (proc.getProcId() == rootProcId && proc.isSuccess()) {
+ // Finalize the procedure state
+ LOG.info("Procedure completed in " +
+ StringUtils.humanTimeDiff(proc.elapsedTime()) + ": " + proc);
+ procedureFinished(proc);
+ break;
+ }
+ } while (procStack.isFailed());
+ }
+
+ private void timeoutLoop() {
+ while (isRunning()) {
+ Procedure proc = waitingTimeout.poll();
+ if (proc == null) continue;
+
+ if (proc.getTimeRemaining() > 100) {
+ // got an early wake, maybe a stop?
+ // re-enqueue the task in case was not a stop or just a signal
+ waitingTimeout.add(proc);
+ continue;
+ }
+
+ // ----------------------------------------------------------------------------
+ // TODO-MAYBE: Should we provide a notification to the store with the
+ // full set of procedures pending and completed to write a compacted
+ // version of the log (in case is a log)?
+ // In theory no, procedures are have a short life, so at some point the store
+ // will have the tracker saying everything is in the last log.
+ // ----------------------------------------------------------------------------
+
+ // The CompletedProcedureCleaner is a special case, and it acts as a chore.
+ // instead of bringing the Chore class in, we reuse this timeout thread for
+ // this special case.
+ if (proc instanceof CompletedProcedureCleaner) {
+ try {
+ ((CompletedProcedureCleaner)proc).periodicExecute(getEnvironment());
+ } catch (Throwable e) {
+ LOG.error("ignoring CompletedProcedureCleaner exception: " + e.getMessage(), e);
+ }
+ proc.setStartTime(EnvironmentEdgeManager.currentTime());
+ waitingTimeout.add(proc);
+ continue;
+ }
+
+ // The procedure received an "abort-timeout", call abort() and
+ // add the procedure back in the queue for rollback.
+ if (proc.setTimeoutFailure()) {
+ long rootProcId = Procedure.getRootProcedureId(procedures, proc);
+ RootProcedureState procStack = rollbackStack.get(rootProcId);
+ procStack.abort();
+ store.update(proc);
+ runnables.addFront(proc);
+ continue;
+ }
+ }
+ }
+
+ /**
+ * Execute the rollback of the full procedure stack.
+ * Once the procedure is rolledback, the root-procedure will be visible as
+ * finished to user, and the result will be the fatal exception.
+ */
+ private boolean executeRollback(final long rootProcId, final RootProcedureState procStack) {
+ Procedure rootProc = procedures.get(rootProcId);
+ RemoteProcedureException exception = rootProc.getException();
+ if (exception == null) {
+ exception = procStack.getException();
+ rootProc.setFailure(exception);
+ store.update(rootProc);
+ }
+
+ List<Procedure> subprocStack = procStack.getSubprocedures();
+ assert subprocStack != null : "called rollback with no steps executed rootProc=" + rootProc;
+
+ int stackTail = subprocStack.size();
+ boolean reuseLock = false;
+ while (stackTail --> 0) {
+ final Procedure proc = subprocStack.get(stackTail);
+
+ if (!reuseLock && !proc.acquireLock(getEnvironment())) {
+ // can't take a lock on the procedure, add the root-proc back on the
+ // queue waiting for the lock availability
+ return false;
+ }
+
+ boolean abortRollback = !executeRollback(proc);
+ abortRollback |= !isRunning() || !store.isRunning();
+
+ // If the next procedure is the same to this one
+ // (e.g. StateMachineProcedure reuse the same instance)
+ // we can avoid to lock/unlock each step
+ reuseLock = stackTail > 0 && (subprocStack.get(stackTail - 1) == proc) && !abortRollback;
+ if (!reuseLock) {
+ proc.releaseLock(getEnvironment());
+ }
+
+ // allows to kill the executor before something is stored to the wal.
+ // useful to test the procedure recovery.
+ if (abortRollback) {
+ return false;
+ }
+
+ subprocStack.remove(stackTail);
+ }
+
+ // Finalize the procedure state
+ LOG.info("Rolledback procedure " + rootProc +
+ " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()) +
+ " exception=" + exception.getMessage());
+ procedureFinished(rootProc);
+ return true;
+ }
+
+ /**
+ * Execute the rollback of the procedure step.
+ * It updates the store with the new state (stack index)
+ * or will remove completly the procedure in case it is a child.
+ */
+ private boolean executeRollback(final Procedure proc) {
+ try {
+ proc.doRollback(getEnvironment());
+ } catch (IOException e) {
+ LOG.debug("rollback attempt failed for " + proc, e);
+ return false;
+ } catch (Throwable e) {
+ // Catch NullPointerExceptions or similar errors...
+ LOG.fatal("CODE-BUG: uncatched runtime exception for procedure: " + proc, e);
+ }
+
+ // allows to kill the executor before something is stored to the wal.
+ // useful to test the procedure recovery.
+ if (testing != null && testing.shouldKillBeforeStoreUpdate()) {
+ LOG.debug("TESTING: Kill before store update");
+ stop();
+ return false;
+ }
+
+ if (proc.removeStackIndex()) {
+ proc.setState(ProcedureState.ROLLEDBACK);
+ if (proc.hasParent()) {
+ store.delete(proc.getProcId());
+ procedures.remove(proc.getProcId());
+ } else {
+ store.update(proc);
+ }
+ } else {
+ store.update(proc);
+ }
+ return true;
+ }
+
+ /**
+ * Executes the specified procedure
+ * - calls the doExecute() of the procedure
+ * - if the procedure execution didn't fail (e.g. invalid user input)
+ * - ...and returned subprocedures
+ * - the subprocedures are initialized.
+ * - the subprocedures are added to the store
+ * - the subprocedures are added to the runnable queue
+ * - the procedure is now in a WAITING state, waiting for the subprocedures to complete
+ * - ...if there are no subprocedure
+ * - the procedure completed successfully
+ * - if there is a parent (WAITING)
+ * - the parent state will be set to RUNNABLE
+ * - in case of failure
+ * - the store is updated with the new state
+ * - the executor (caller of this method) will start the rollback of the procedure
+ */
+ private void execProcedure(final RootProcedureState procStack, final Procedure procedure) {
+ Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
+
+ // Execute the procedure
+ boolean reExecute = false;
+ Procedure[] subprocs = null;
+ do {
+ reExecute = false;
+ try {
+ subprocs = procedure.doExecute(getEnvironment());
+ if (subprocs != null && subprocs.length == 0) {
+ subprocs = null;
+ }
+ } catch (ProcedureYieldException e) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("yield procedure: " + procedure);
+ }
+ runnables.yield(procedure);
+ return;
+ } catch (Throwable e) {
+ // Catch NullPointerExceptions or similar errors...
+ String msg = "CODE-BUG: uncatched runtime exception for procedure: " + procedure;
+ LOG.error(msg, e);
+ procedure.setFailure(new RemoteProcedureException(msg, e));
+ }
+
+ if (!procedure.isFailed()) {
+ if (subprocs != null) {
+ if (subprocs.length == 1 && subprocs[0] == procedure) {
+ // quick-shortcut for a state machine like procedure
+ subprocs = null;
+ reExecute = true;
+ } else {
+ // yield the current procedure, and make the subprocedure runnable
+ for (int i = 0; i < subprocs.length; ++i) {
+ Procedure subproc = subprocs[i];
+ if (subproc == null) {
+ String msg = "subproc[" + i + "] is null, aborting the procedure";
+ procedure.setFailure(new RemoteProcedureException(msg,
+ new IllegalArgumentException(msg)));
+ subprocs = null;
+ break;
+ }
+
+ assert subproc.getState() == ProcedureState.INITIALIZING;
+ subproc.setParentProcId(procedure.getProcId());
+ subproc.setProcId(nextProcId());
+ }
+
+ if (!procedure.isFailed()) {
+ procedure.setChildrenLatch(subprocs.length);
+ switch (procedure.getState()) {
+ case RUNNABLE:
+ procedure.setState(ProcedureState.WAITING);
+ break;
+ case WAITING_TIMEOUT:
+ waitingTimeout.add(procedure);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+ waitingTimeout.add(procedure);
+ } else {
+ // No subtask, so we are done
+ procedure.setState(ProcedureState.FINISHED);
+ }
+ }
+
+ // Add the procedure to the stack
+ procStack.addRollbackStep(procedure);
+
+ // allows to kill the executor before something is stored to the wal.
+ // useful to test the procedure recovery.
+ if (testing != null && testing.shouldKillBeforeStoreUpdate()) {
+ LOG.debug("TESTING: Kill before store update");
+ stop();
+ return;
+ }
+
+ // Commit the transaction
+ if (subprocs != null && !procedure.isFailed()) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("store add " + procedure + " children " + Arrays.toString(subprocs));
+ }
+ store.insert(procedure, subprocs);
+ } else {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("store update " + procedure);
+ }
+ store.update(procedure);
+ }
+
+ // if the store is not running we are aborting
+ if (!store.isRunning()) {
+ return;
+ }
+
+ assert (reExecute && subprocs == null) || !reExecute;
+ } while (reExecute);
+
+ // Submit the new subprocedures
+ if (subprocs != null && !procedure.isFailed()) {
+ for (int i = 0; i < subprocs.length; ++i) {
+ Procedure subproc = subprocs[i];
+ assert !procedures.containsKey(subproc.getProcId());
+ procedures.put(subproc.getProcId(), subproc);
+ runnables.addFront(subproc);
+ }
+ }
+
+ if (procedure.isFinished() && procedure.hasParent()) {
+ Procedure parent = procedures.get(procedure.getParentProcId());
+ if (parent == null) {
+ assert procStack.isRollingback();
+ return;
+ }
+
+ // If this procedure is the last child awake the parent procedure
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(parent + " child is done: " + procedure);
+ }
+ if (parent.childrenCountDown() && parent.getState() == ProcedureState.WAITING) {
+ parent.setState(ProcedureState.RUNNABLE);
+ store.update(parent);
+ runnables.addFront(parent);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(parent + " all the children finished their work, resume.");
+ }
+ return;
+ }
+ }
+ }
+
+ private void sendProcedureLoadedNotification(final long procId) {
+ if (!this.listeners.isEmpty()) {
+ for (ProcedureExecutorListener listener: this.listeners) {
+ try {
+ listener.procedureLoaded(procId);
+ } catch (Throwable e) {
+ LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e);
+ }
+ }
+ }
+ }
+
+ private void sendProcedureAddedNotification(final long procId) {
+ if (!this.listeners.isEmpty()) {
+ for (ProcedureExecutorListener listener: this.listeners) {
+ try {
+ listener.procedureAdded(procId);
+ } catch (Throwable e) {
+ LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e);
+ }
+ }
+ }
+ }
+
+ private void sendProcedureFinishedNotification(final long procId) {
+ if (!this.listeners.isEmpty()) {
+ for (ProcedureExecutorListener listener: this.listeners) {
+ try {
+ listener.procedureFinished(procId);
+ } catch (Throwable e) {
+ LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e);
+ }
+ }
+ }
+ }
+
+ private long nextProcId() {
+ long procId = lastProcId.incrementAndGet();
+ if (procId < 0) {
+ while (!lastProcId.compareAndSet(procId, 0)) {
+ procId = lastProcId.get();
+ if (procId >= 0)
+ break;
+ }
+ while (procedures.containsKey(procId)) {
+ procId = lastProcId.incrementAndGet();
+ }
+ }
+ return procId;
+ }
+
+ private Long getRootProcedureId(Procedure proc) {
+ return Procedure.getRootProcedureId(procedures, proc);
+ }
+
+ private void procedureFinished(final Procedure proc) {
+ // call the procedure completion cleanup handler
+ try {
+ proc.completionCleanup(getEnvironment());
+ } catch (Throwable e) {
+ // Catch NullPointerExceptions or similar errors...
+ LOG.error("CODE-BUG: uncatched runtime exception for procedure: " + proc, e);
+ }
+
+ // update the executor internal state maps
+ completed.put(proc.getProcId(), newResultFromProcedure(proc));
+ rollbackStack.remove(proc.getProcId());
+ procedures.remove(proc.getProcId());
+
+ // call the runnableSet completion cleanup handler
+ try {
+ runnables.completionCleanup(proc);
+ } catch (Throwable e) {
+ // Catch NullPointerExceptions or similar errors...
+ LOG.error("CODE-BUG: uncatched runtime exception for runnableSet: " + runnables, e);
+ }
+
+ // Notify the listeners
+ sendProcedureFinishedNotification(proc.getProcId());
+ }
+
+ public Pair<ProcedureResult, Procedure> getResultOrProcedure(final long procId) {
+ ProcedureResult result = completed.get(procId);
+ Procedure proc = null;
+ if (result == null) {
+ proc = procedures.get(procId);
+ if (proc == null) {
+ result = completed.get(procId);
+ }
+ }
+ return new Pair(result, proc);
+ }
+
+ private static ProcedureResult newResultFromProcedure(final Procedure proc) {
+ if (proc.isFailed()) {
+ return new ProcedureResult(proc.getStartTime(), proc.getLastUpdate(), proc.getException());
+ }
+ return new ProcedureResult(proc.getStartTime(), proc.getLastUpdate(), proc.getResult());
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
new file mode 100644
index 0000000..03d007a
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.util.Map;
+
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * This class is a container of queues that allows to select a queue
+ * in a round robin fashion, considering priority of the queue.
+ *
+ * the quantum is just how many poll() will return the same object.
+ * e.g. if quantum is 1 and you have A and B as object you'll get: A B A B
+ * e.g. if quantum is 2 and you have A and B as object you'll get: A A B B A A B B
+ * then the object priority is just a priority * quantum
+ *
+ * Example:
+ * - three queues (A, B, C) with priorities (1, 1, 2)
+ * - The first poll() will return A
+ * - The second poll() will return B
+ * - The third and forth poll() will return C
+ * - and so on again and again.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ProcedureFairRunQueues<TKey, TQueue extends ProcedureFairRunQueues.FairObject> {
+ private ConcurrentSkipListMap<TKey, TQueue> objMap =
+ new ConcurrentSkipListMap<TKey, TQueue>();
+
+ private final ReentrantLock lock = new ReentrantLock();
+ private final int quantum;
+
+ private Map.Entry<TKey, TQueue> current = null;
+ private int currentQuantum = 0;
+
+ public interface FairObject {
+ boolean isAvailable();
+ int getPriority();
+ }
+
+ /**
+ * @param quantum how many poll() will return the same object.
+ */
+ public ProcedureFairRunQueues(final int quantum) {
+ this.quantum = quantum;
+ }
+
+ public TQueue get(final TKey key) {
+ return objMap.get(key);
+ }
+
+ public TQueue add(final TKey key, final TQueue queue) {
+ TQueue oldq = objMap.putIfAbsent(key, queue);
+ return oldq != null ? oldq : queue;
+ }
+
+ public TQueue remove(final TKey key) {
+ TQueue queue = objMap.get(key);
+ if (queue != null) {
+ lock.lock();
+ try {
+ queue = objMap.remove(key);
+ if (current != null && queue == current.getValue()) {
+ currentQuantum = 0;
+ current = null;
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+ return queue;
+ }
+
+ public void clear() {
+ lock.lock();
+ try {
+ current = null;
+ objMap.clear();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * @return the next available item if present
+ */
+ public TQueue poll() {
+ lock.lock();
+ try {
+ TQueue queue;
+ if (currentQuantum == 0) {
+ if (nextObject() == null) {
+ // nothing here
+ return null;
+ }
+
+ queue = current.getValue();
+ currentQuantum = calculateQuantum(queue) - 1;
+ } else {
+ currentQuantum--;
+ queue = current.getValue();
+ }
+
+ if (!queue.isAvailable()) {
+ Map.Entry<TKey, TQueue> last = current;
+ // Try the next one
+ do {
+ if (nextObject() == null)
+ return null;
+ } while (current.getValue() != last.getValue() && !current.getValue().isAvailable());
+
+ queue = current.getValue();
+ currentQuantum = calculateQuantum(queue) - 1;
+ }
+
+ return queue;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append('{');
+ for (Map.Entry<TKey, TQueue> entry: objMap.entrySet()) {
+ builder.append(entry.getKey());
+ builder.append(':');
+ builder.append(entry.getValue());
+ }
+ builder.append('}');
+ return builder.toString();
+ }
+
+ private Map.Entry<TKey, TQueue> nextObject() {
+ Map.Entry<TKey, TQueue> next = null;
+
+ // If we have already a key, try the next one
+ if (current != null) {
+ next = objMap.higherEntry(current.getKey());
+ }
+
+ // if there is no higher key, go back to the first
+ current = (next != null) ? next : objMap.firstEntry();
+ return current;
+ }
+
+ private int calculateQuantum(final TQueue fairObject) {
+ // TODO
+ return Math.max(1, fairObject.getPriority() * quantum);
+ }
+}
[18/50] [abbrv] hbase git commit: HBASE-13211 Procedure V2 - master
Enable/Disable table (Stephen Yuan Jiang)
Posted by jm...@apache.org.
HBASE-13211 Procedure V2 - master Enable/Disable table (Stephen Yuan Jiang)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/57c70f0a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/57c70f0a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/57c70f0a
Branch: refs/heads/hbase-11339
Commit: 57c70f0af8399d782fda05a6274227c26b811482
Parents: 4ae8b8c
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 21:52:02 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:43 2015 +0100
----------------------------------------------------------------------
.../generated/MasterProcedureProtos.java | 2253 +++++++++++++++++-
.../src/main/protobuf/MasterProcedure.proto | 30 +
.../org/apache/hadoop/hbase/master/HMaster.java | 41 +-
.../master/procedure/DisableTableProcedure.java | 540 +++++
.../master/procedure/EnableTableProcedure.java | 582 +++++
.../procedure/TableProcedureInterface.java | 6 +-
.../MasterProcedureTestingUtility.java | 14 +
.../procedure/TestDisableTableProcedure.java | 182 ++
.../procedure/TestEnableTableProcedure.java | 193 ++
.../TestMasterFailoverWithProcedures.java | 76 +
10 files changed, 3796 insertions(+), 121 deletions(-)
----------------------------------------------------------------------
[20/50] [abbrv] hbase git commit: HBASE-13210 Procedure V2 - master
Modify table (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-protocol/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index 4e9b05e..97d1af6 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -58,6 +58,23 @@ message CreateTableStateData {
repeated RegionInfo region_info = 3;
}
+enum ModifyTableState {
+ MODIFY_TABLE_PREPARE = 1;
+ MODIFY_TABLE_PRE_OPERATION = 2;
+ MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3;
+ MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4;
+ MODIFY_TABLE_DELETE_FS_LAYOUT = 5;
+ MODIFY_TABLE_POST_OPERATION = 6;
+ MODIFY_TABLE_REOPEN_ALL_REGIONS = 7;
+}
+
+message ModifyTableStateData {
+ required UserInformation user_info = 1;
+ optional TableSchema unmodified_table_schema = 2;
+ required TableSchema modified_table_schema = 3;
+ required bool delete_column_family_in_modify = 4;
+}
+
enum DeleteTableState {
DELETE_TABLE_PRE_OPERATION = 1;
DELETE_TABLE_REMOVE_FROM_META = 2;
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index e2e600c..ba739b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -18,10 +18,6 @@
*/
package org.apache.hadoop.hbase.master;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
@@ -43,10 +39,11 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.Service;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -93,7 +90,6 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
-import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
@@ -102,15 +98,17 @@ import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@ -146,6 +144,11 @@ import org.mortbay.jetty.Connector;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.servlet.Context;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Maps;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Service;
+
/**
* HMaster is the "master server" for HBase. An HBase cluster has one active
* master. If many masters are started, all compete. Whichever wins goes on to
@@ -1728,8 +1731,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (cpHost != null) {
cpHost.preModifyTable(tableName, descriptor);
}
+
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
- new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
+
+ // Execute the operation synchronously - wait for the operation completes before continuing.
+ long procId = this.procedureExecutor.submitProcedure(
+ new ModifyTableProcedure(procedureExecutor.getEnvironment(), descriptor));
+
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+
if (cpHost != null) {
cpHost.postModifyTable(tableName, descriptor);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 78e4c11..de28cdc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -583,10 +583,12 @@ public class MasterFileSystem {
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
- throw new IOException("Could not delete family "
- + Bytes.toString(familyName) + " from FileSystem for region "
- + region.getRegionNameAsString() + "(" + region.getEncodedName()
- + ")");
+ if (fs.exists(familyDir)) {
+ throw new IOException("Could not delete family "
+ + Bytes.toString(familyName) + " from FileSystem for region "
+ + region.getRegionNameAsString() + "(" + region.getEncodedName()
+ + ")");
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
new file mode 100644
index 0000000..c6ff1b6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkReOpen;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Helper class for schema change procedures
+ */
+@InterfaceAudience.Private
+public final class MasterDDLOperationHelper {
+ private static final Log LOG = LogFactory.getLog(MasterDDLOperationHelper.class);
+
+ private MasterDDLOperationHelper() {}
+
+ /**
+ * Check whether online schema change is allowed from config
+ **/
+ public static boolean isOnlineSchemaChangeAllowed(final MasterProcedureEnv env) {
+ return env.getMasterServices().getConfiguration()
+ .getBoolean("hbase.online.schema.update.enable", false);
+ }
+
+ /**
+ * Check whether a table is modifiable - exists and either offline or online with config set
+ * @param env MasterProcedureEnv
+ * @param tableName name of the table
+ * @throws IOException
+ */
+ public static void checkTableModifiable(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ // Checks whether the table exists
+ if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ throw new TableNotFoundException(tableName);
+ }
+
+ // We only execute this procedure with table online if online schema change config is set.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(tableName, TableState.State.DISABLED)
+ && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
+ throw new TableNotDisabledException(tableName);
+ }
+ }
+
+ /**
+ * Remove the column family from the file system
+ **/
+ public static void deleteColumnFamilyFromFileSystem(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ List<HRegionInfo> regionInfoList,
+ final byte[] familyName) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName);
+ }
+ if (regionInfoList == null) {
+ regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName);
+ }
+ for (HRegionInfo hri : regionInfoList) {
+ // Delete the family directory in FS for all the regions one by one
+ mfs.deleteFamilyFromFS(hri, familyName);
+ }
+ }
+
+ /**
+ * Reopen all regions from a table after a schema change operation.
+ **/
+ public static boolean reOpenAllRegions(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final List<HRegionInfo> regionInfoList) throws IOException {
+ boolean done = false;
+ LOG.info("Bucketing regions by region server...");
+ List<HRegionLocation> regionLocations = null;
+ Connection connection = env.getMasterServices().getConnection();
+ try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+ regionLocations = locator.getAllRegionLocations();
+ }
+ // Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
+ NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
+ for (HRegionLocation location : regionLocations) {
+ hri2Sn.put(location.getRegionInfo(), location.getServerName());
+ }
+ TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
+ List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
+ for (HRegionInfo hri : regionInfoList) {
+ ServerName sn = hri2Sn.get(hri);
+ // Skip the offlined split parent region
+ // See HBASE-4578 for more information.
+ if (null == sn) {
+ LOG.info("Skip " + hri);
+ continue;
+ }
+ if (!serverToRegions.containsKey(sn)) {
+ LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
+ serverToRegions.put(sn, hriList);
+ }
+ reRegions.add(hri);
+ serverToRegions.get(sn).add(hri);
+ }
+
+ LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size()
+ + " region servers.");
+ AssignmentManager am = env.getMasterServices().getAssignmentManager();
+ am.setRegionsToReopen(reRegions);
+ BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am);
+ while (true) {
+ try {
+ if (bulkReopen.bulkReOpen()) {
+ done = true;
+ break;
+ } else {
+ LOG.warn("Timeout before reopening all regions");
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Reopen was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ break;
+ }
+ }
+ return done;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
new file mode 100644
index 0000000..8ccdaa4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -0,0 +1,512 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class ModifyTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, ModifyTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private HTableDescriptor unmodifiedHTableDescriptor = null;
+ private HTableDescriptor modifiedHTableDescriptor;
+ private UserGroupInformation user;
+ private boolean deleteColumnFamilyInModify;
+
+ private List<HRegionInfo> regionInfoList;
+ private Boolean traceEnabled = null;
+
+ public ModifyTableProcedure() {
+ initilize();
+ }
+
+ public ModifyTableProcedure(
+ final MasterProcedureEnv env,
+ final HTableDescriptor htd) throws IOException {
+ initilize();
+ this.modifiedHTableDescriptor = htd;
+ this.user = env.getRequestUser().getUGI();
+ }
+
+ private void initilize() {
+ this.unmodifiedHTableDescriptor = null;
+ this.regionInfoList = null;
+ this.traceEnabled = null;
+ this.deleteColumnFamilyInModify = false;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case MODIFY_TABLE_PREPARE:
+ prepareModify(env);
+ setNextState(ModifyTableState.MODIFY_TABLE_PRE_OPERATION);
+ break;
+ case MODIFY_TABLE_PRE_OPERATION:
+ preModify(env, state);
+ setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR);
+ break;
+ case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR:
+ updateTableDescriptor(env);
+ setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN);
+ break;
+ case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
+ updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
+ if (deleteColumnFamilyInModify) {
+ setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
+ } else {
+ setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
+ }
+ break;
+ case MODIFY_TABLE_DELETE_FS_LAYOUT:
+ deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
+ setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
+ break;
+ case MODIFY_TABLE_POST_OPERATION:
+ postModify(env, state);
+ setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS);
+ break;
+ case MODIFY_TABLE_REOPEN_ALL_REGIONS:
+ reOpenAllRegionsIfTableIsOnline(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ if (!isRollbackSupported(state)) {
+ // We reach a state that cannot be rolled back. We just need to keep retry.
+ LOG.warn("Error trying to modify table=" + getTableName() + " state=" + state, e);
+ } else {
+ LOG.error("Error trying to modify table=" + getTableName() + " state=" + state, e);
+ setFailure("master-modify-table", e);
+ }
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case MODIFY_TABLE_REOPEN_ALL_REGIONS:
+ break; // Nothing to undo.
+ case MODIFY_TABLE_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to un-modify?
+ break;
+ case MODIFY_TABLE_DELETE_FS_LAYOUT:
+ // Once we reach to this state - we could NOT rollback - as it is tricky to undelete
+ // the deleted files. We are not suppose to reach here, throw exception so that we know
+ // there is a code bug to investigate.
+ assert deleteColumnFamilyInModify;
+ throw new UnsupportedOperationException(this + " rollback of state=" + state
+ + " is unsupported.");
+ case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
+ // Undo the replica column update.
+ updateReplicaColumnsIfNeeded(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
+ break;
+ case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR:
+ restoreTableDescriptor(env);
+ break;
+ case MODIFY_TABLE_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to un-modify?
+ break;
+ case MODIFY_TABLE_PREPARE:
+ break; // Nothing to undo.
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ LOG.warn("Fail trying to rollback modify table=" + getTableName() + " state=" + state, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected ModifyTableState getState(final int stateId) {
+ return ModifyTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final ModifyTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected ModifyTableState getInitialState() {
+ return ModifyTableState.MODIFY_TABLE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(final ModifyTableState state) {
+ if (aborted.get() && isRollbackSupported(state)) {
+ setAbortFailure("modify-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ getTableName(),
+ EventType.C_M_MODIFY_TABLE.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.ModifyTableStateData.Builder modifyTableMsg =
+ MasterProcedureProtos.ModifyTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setModifiedTableSchema(modifiedHTableDescriptor.convert())
+ .setDeleteColumnFamilyInModify(deleteColumnFamilyInModify);
+
+ if (unmodifiedHTableDescriptor != null) {
+ modifyTableMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert());
+ }
+
+ modifyTableMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.ModifyTableStateData modifyTableMsg =
+ MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo());
+ modifiedHTableDescriptor = HTableDescriptor.convert(modifyTableMsg.getModifiedTableSchema());
+ deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify();
+
+ if (modifyTableMsg.hasUnmodifiedTableSchema()) {
+ unmodifiedHTableDescriptor =
+ HTableDescriptor.convert(modifyTableMsg.getUnmodifiedTableSchema());
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return modifiedHTableDescriptor.getTableName();
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Check conditions before any real action of modifying a table.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareModify(final MasterProcedureEnv env) throws IOException {
+ // Checks whether the table exists
+ if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) {
+ throw new TableNotFoundException(getTableName());
+ }
+
+ // In order to update the descriptor, we need to retrieve the old descriptor for comparison.
+ this.unmodifiedHTableDescriptor =
+ env.getMasterServices().getTableDescriptors().get(getTableName());
+
+ if (env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ // We only execute this procedure with table online if online schema change config is set.
+ if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
+ throw new TableNotDisabledException(getTableName());
+ }
+
+ if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor
+ .getRegionReplication()) {
+ throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
+ }
+ }
+
+ // Find out whether all column families in unmodifiedHTableDescriptor also exists in
+ // the modifiedHTableDescriptor. This is to determine whether we are safe to rollback.
+ final Set<byte[]> oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys();
+ final Set<byte[]> newFamilies = modifiedHTableDescriptor.getFamiliesKeys();
+ for (byte[] familyName : oldFamilies) {
+ if (!newFamilies.contains(familyName)) {
+ this.deleteColumnFamilyInModify = true;
+ break;
+ }
+ }
+ }
+
+ /**
+ * Action before modifying table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preModify(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Update descriptor
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
+ }
+
+ /**
+ * Undo the descriptor change (for rollback)
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+
+ // delete any new column families from the modifiedHTableDescriptor.
+ deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
+
+ // Make sure regions are opened after table descriptor is updated.
+ reOpenAllRegionsIfTableIsOnline(env);
+ }
+
+ /**
+ * Removes from hdfs the families that are not longer present in the new table descriptor.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void deleteFromFs(final MasterProcedureEnv env,
+ final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor)
+ throws IOException {
+ final Set<byte[]> oldFamilies = oldHTableDescriptor.getFamiliesKeys();
+ final Set<byte[]> newFamilies = newHTableDescriptor.getFamiliesKeys();
+ for (byte[] familyName : oldFamilies) {
+ if (!newFamilies.contains(familyName)) {
+ MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(
+ env,
+ getTableName(),
+ getRegionInfoList(env),
+ familyName);
+ }
+ }
+ }
+
+ /**
+ * update replica column families if necessary.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void updateReplicaColumnsIfNeeded(
+ final MasterProcedureEnv env,
+ final HTableDescriptor oldHTableDescriptor,
+ final HTableDescriptor newHTableDescriptor) throws IOException {
+ final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
+ final int newReplicaCount = newHTableDescriptor.getRegionReplication();
+
+ if (newReplicaCount < oldReplicaCount) {
+ Set<byte[]> tableRows = new HashSet<byte[]>();
+ Connection connection = env.getMasterServices().getConnection();
+ Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName());
+ scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+
+ try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
+ ResultScanner resScanner = metaTable.getScanner(scan);
+ for (Result result : resScanner) {
+ tableRows.add(result.getRow());
+ }
+ MetaTableAccessor.removeRegionReplicasFromMeta(
+ tableRows,
+ newReplicaCount,
+ oldReplicaCount - newReplicaCount,
+ connection);
+ }
+ }
+
+ // Setup replication for region replicas if needed
+ if (newReplicaCount > 1 && oldReplicaCount <= 1) {
+ ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
+ }
+ }
+
+ /**
+ * Action after modifying table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postModify(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Last action from the procedure - executed when online schema change is supported.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
+ // This operation only run when the table is enabled.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ return;
+ }
+
+ if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) {
+ LOG.info("Completed modify table operation on table " + getTableName());
+ } else {
+ LOG.warn("Error on reopening the regions on table " + getTableName());
+ }
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled whether the trace is enabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case MODIFY_TABLE_PRE_OPERATION:
+ cpHost.preModifyTableHandler(getTableName(), modifiedHTableDescriptor);
+ break;
+ case MODIFY_TABLE_POST_OPERATION:
+ cpHost.postModifyTableHandler(getTableName(), modifiedHTableDescriptor);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+
+ /*
+ * Check whether we are in the state that can be rollback
+ */
+ private boolean isRollbackSupported(final ModifyTableState state) {
+ if (deleteColumnFamilyInModify) {
+ switch (state) {
+ case MODIFY_TABLE_DELETE_FS_LAYOUT:
+ case MODIFY_TABLE_POST_OPERATION:
+ case MODIFY_TABLE_REOPEN_ALL_REGIONS:
+ // It is not safe to rollback if we reach to these states.
+ return false;
+ default:
+ break;
+ }
+ }
+ return true;
+ }
+
+ private List<HRegionInfo> getRegionInfoList(final MasterProcedureEnv env) throws IOException {
+ if (regionInfoList == null) {
+ regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ }
+ return regionInfoList;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
new file mode 100644
index 0000000..af29338
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
@@ -0,0 +1,403 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestModifyTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestModifyTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout=60000)
+ public void testModifyTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyTable");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // Modify the table descriptor
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+
+ // Test 1: Modify 1 property
+ long newMaxFileSize = htd.getMaxFileSize() * 2;
+ htd.setMaxFileSize(newMaxFileSize);
+ htd.setRegionReplication(3);
+
+ long procId1 = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
+
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newMaxFileSize, currentHtd.getMaxFileSize());
+
+ // Test 2: Modify multiple properties
+ boolean newReadOnlyOption = htd.isReadOnly() ? false : true;
+ long newMemStoreFlushSize = htd.getMemStoreFlushSize() * 2;
+ htd.setReadOnly(newReadOnlyOption);
+ htd.setMemStoreFlushSize(newMemStoreFlushSize);
+
+ long procId2 = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newReadOnlyOption, currentHtd.isReadOnly());
+ assertEquals(newMemStoreFlushSize, currentHtd.getMemStoreFlushSize());
+ }
+
+ @Test(timeout = 60000)
+ public void testModifyTableAddCF() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyTableAddCF");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1");
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(1, currentHtd.getFamiliesKeys().size());
+
+ // Test 1: Modify the table descriptor online
+ String cf2 = "cf2";
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd.addFamily(new HColumnDescriptor(cf2));
+
+ long procId = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+ assertTrue(currentHtd.hasFamily(cf2.getBytes()));
+
+ // Test 2: Modify the table descriptor offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ String cf3 = "cf3";
+ HTableDescriptor htd2 =
+ new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd2.addFamily(new HColumnDescriptor(cf3));
+
+ long procId2 =
+ ProcedureTestingUtility.submitAndWait(procExec,
+ new ModifyTableProcedure(procExec.getEnvironment(), htd2));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertTrue(currentHtd.hasFamily(cf3.getBytes()));
+ assertEquals(3, currentHtd.getFamiliesKeys().size());
+ }
+
+ @Test(timeout = 60000)
+ public void testModifyTableDeleteCF() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyTableAddCF");
+ final String cf2 = "cf2";
+ final String cf3 = "cf3";
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1", cf2, cf3);
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(3, currentHtd.getFamiliesKeys().size());
+
+ // Test 1: Modify the table descriptor
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd.removeFamily(cf2.getBytes());
+
+ long procId = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+ assertFalse(currentHtd.hasFamily(cf2.getBytes()));
+
+ // Test 2: Modify the table descriptor offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+
+ HTableDescriptor htd2 =
+ new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd2.removeFamily(cf3.getBytes());
+
+ long procId2 =
+ ProcedureTestingUtility.submitAndWait(procExec,
+ new ModifyTableProcedure(procExec.getEnvironment(), htd2));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(1, currentHtd.getFamiliesKeys().size());
+ assertFalse(currentHtd.hasFamily(cf3.getBytes()));
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
+ final String cf2 = "cf2";
+ final String cf3 = "cf3";
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1", cf3);
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Modify multiple properties of the table.
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(cf2));
+ htd.removeFamily(cf3.getBytes());
+ htd.setRegionReplication(3);
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // Validate descriptor
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+
+ // cf2 should be added cf3 should be removed
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, false, "cf1", cf2);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
+ final String cf2 = "cf2";
+ final String cf3 = "cf3";
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1", cf3);
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Modify multiple properties of the table.
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(cf2));
+ htd.removeFamily(cf3.getBytes());
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ ModifyTableState.values());
+
+ // Validate descriptor
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+ assertTrue(currentHtd.hasFamily(cf2.getBytes()));
+ assertFalse(currentHtd.hasFamily(cf3.getBytes()));
+
+ // cf2 should be added cf3 should be removed
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, "cf1", cf2);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String familyName = "cf2";
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1");
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(familyName));
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and rollback the step twice
+ int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // cf2 should not be present
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, "cf1");
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String familyName = "cf2";
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(familyName));
+ htd.setRegionReplication(3);
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and rollback the step twice
+ int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // cf2 should not be present
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, "cf1");
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
+ final String familyToAddName = "cf2";
+ final String familyToRemove = "cf1";
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, familyToRemove);
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd.setCompactionEnabled(!htd.isCompactionEnabled());
+ htd.addFamily(new HColumnDescriptor(familyToAddName));
+ htd.removeFamily(familyToRemove.getBytes());
+ htd.setRegionReplication(3);
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
+ // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ int numberOfSteps = 5;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // "cf2" should be added and "cf1" should be removed
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, false, familyToAddName);
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
[21/50] [abbrv] hbase git commit: HBASE-13210 Procedure V2 - master
Modify table (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/7f538336/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 6d1694a..98260c1 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -127,6 +127,133 @@ public final class MasterProcedureProtos {
}
/**
+ * Protobuf enum {@code ModifyTableState}
+ */
+ public enum ModifyTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>MODIFY_TABLE_PREPARE = 1;</code>
+ */
+ MODIFY_TABLE_PREPARE(0, 1),
+ /**
+ * <code>MODIFY_TABLE_PRE_OPERATION = 2;</code>
+ */
+ MODIFY_TABLE_PRE_OPERATION(1, 2),
+ /**
+ * <code>MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR(2, 3),
+ /**
+ * <code>MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4;</code>
+ */
+ MODIFY_TABLE_REMOVE_REPLICA_COLUMN(3, 4),
+ /**
+ * <code>MODIFY_TABLE_DELETE_FS_LAYOUT = 5;</code>
+ */
+ MODIFY_TABLE_DELETE_FS_LAYOUT(4, 5),
+ /**
+ * <code>MODIFY_TABLE_POST_OPERATION = 6;</code>
+ */
+ MODIFY_TABLE_POST_OPERATION(5, 6),
+ /**
+ * <code>MODIFY_TABLE_REOPEN_ALL_REGIONS = 7;</code>
+ */
+ MODIFY_TABLE_REOPEN_ALL_REGIONS(6, 7),
+ ;
+
+ /**
+ * <code>MODIFY_TABLE_PREPARE = 1;</code>
+ */
+ public static final int MODIFY_TABLE_PREPARE_VALUE = 1;
+ /**
+ * <code>MODIFY_TABLE_PRE_OPERATION = 2;</code>
+ */
+ public static final int MODIFY_TABLE_PRE_OPERATION_VALUE = 2;
+ /**
+ * <code>MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ public static final int MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR_VALUE = 3;
+ /**
+ * <code>MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4;</code>
+ */
+ public static final int MODIFY_TABLE_REMOVE_REPLICA_COLUMN_VALUE = 4;
+ /**
+ * <code>MODIFY_TABLE_DELETE_FS_LAYOUT = 5;</code>
+ */
+ public static final int MODIFY_TABLE_DELETE_FS_LAYOUT_VALUE = 5;
+ /**
+ * <code>MODIFY_TABLE_POST_OPERATION = 6;</code>
+ */
+ public static final int MODIFY_TABLE_POST_OPERATION_VALUE = 6;
+ /**
+ * <code>MODIFY_TABLE_REOPEN_ALL_REGIONS = 7;</code>
+ */
+ public static final int MODIFY_TABLE_REOPEN_ALL_REGIONS_VALUE = 7;
+
+
+ public final int getNumber() { return value; }
+
+ public static ModifyTableState valueOf(int value) {
+ switch (value) {
+ case 1: return MODIFY_TABLE_PREPARE;
+ case 2: return MODIFY_TABLE_PRE_OPERATION;
+ case 3: return MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR;
+ case 4: return MODIFY_TABLE_REMOVE_REPLICA_COLUMN;
+ case 5: return MODIFY_TABLE_DELETE_FS_LAYOUT;
+ case 6: return MODIFY_TABLE_POST_OPERATION;
+ case 7: return MODIFY_TABLE_REOPEN_ALL_REGIONS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<ModifyTableState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<ModifyTableState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<ModifyTableState>() {
+ public ModifyTableState findValueByNumber(int number) {
+ return ModifyTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(1);
+ }
+
+ private static final ModifyTableState[] VALUES = values();
+
+ public static ModifyTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private ModifyTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:ModifyTableState)
+ }
+
+ /**
* Protobuf enum {@code DeleteTableState}
*/
public enum DeleteTableState
@@ -219,7 +346,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(1);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2);
}
private static final DeleteTableState[] VALUES = values();
@@ -1402,7 +1529,7 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(class_scope:CreateTableStateData)
}
- public interface DeleteTableStateDataOrBuilder
+ public interface ModifyTableStateDataOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .UserInformation user_info = 1;
@@ -1419,64 +1546,63 @@ public final class MasterProcedureProtos {
*/
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
- // required .TableName table_name = 2;
+ // optional .TableSchema unmodified_table_schema = 2;
/**
- * <code>required .TableName table_name = 2;</code>
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
*/
- boolean hasTableName();
+ boolean hasUnmodifiedTableSchema();
/**
- * <code>required .TableName table_name = 2;</code>
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema();
/**
- * <code>required .TableName table_name = 2;</code>
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder();
- // repeated .RegionInfo region_info = 3;
+ // required .TableSchema modified_table_schema = 3;
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required .TableSchema modified_table_schema = 3;</code>
*/
- java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>
- getRegionInfoList();
+ boolean hasModifiedTableSchema();
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required .TableSchema modified_table_schema = 3;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema();
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required .TableSchema modified_table_schema = 3;</code>
*/
- int getRegionInfoCount();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder();
+
+ // required bool delete_column_family_in_modify = 4;
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required bool delete_column_family_in_modify = 4;</code>
*/
- java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
- getRegionInfoOrBuilderList();
+ boolean hasDeleteColumnFamilyInModify();
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required bool delete_column_family_in_modify = 4;</code>
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
- int index);
+ boolean getDeleteColumnFamilyInModify();
}
/**
- * Protobuf type {@code DeleteTableStateData}
+ * Protobuf type {@code ModifyTableStateData}
*/
- public static final class DeleteTableStateData extends
+ public static final class ModifyTableStateData extends
com.google.protobuf.GeneratedMessage
- implements DeleteTableStateDataOrBuilder {
- // Use DeleteTableStateData.newBuilder() to construct.
- private DeleteTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ implements ModifyTableStateDataOrBuilder {
+ // Use ModifyTableStateData.newBuilder() to construct.
+ private ModifyTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
- private DeleteTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+ private ModifyTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
- private static final DeleteTableStateData defaultInstance;
- public static DeleteTableStateData getDefaultInstance() {
+ private static final ModifyTableStateData defaultInstance;
+ public static ModifyTableStateData getDefaultInstance() {
return defaultInstance;
}
- public DeleteTableStateData getDefaultInstanceForType() {
+ public ModifyTableStateData getDefaultInstanceForType() {
return defaultInstance;
}
@@ -1486,7 +1612,7 @@ public final class MasterProcedureProtos {
getUnknownFields() {
return this.unknownFields;
}
- private DeleteTableStateData(
+ private ModifyTableStateData(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -1523,24 +1649,34 @@ public final class MasterProcedureProtos {
break;
}
case 18: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- subBuilder = tableName_.toBuilder();
+ subBuilder = unmodifiedTableSchema_.toBuilder();
}
- tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
if (subBuilder != null) {
- subBuilder.mergeFrom(tableName_);
- tableName_ = subBuilder.buildPartial();
+ subBuilder.mergeFrom(unmodifiedTableSchema_);
+ unmodifiedTableSchema_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
- if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>();
- mutable_bitField0_ |= 0x00000004;
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = modifiedTableSchema_.toBuilder();
}
- regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ modifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(modifiedTableSchema_);
+ modifiedTableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ deleteColumnFamilyInModify_ = input.readBool();
break;
}
}
@@ -1551,37 +1687,34 @@ public final class MasterProcedureProtos {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
- if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
- }
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.Builder.class);
}
- public static com.google.protobuf.Parser<DeleteTableStateData> PARSER =
- new com.google.protobuf.AbstractParser<DeleteTableStateData>() {
- public DeleteTableStateData parsePartialFrom(
+ public static com.google.protobuf.Parser<ModifyTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<ModifyTableStateData>() {
+ public ModifyTableStateData parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return new DeleteTableStateData(input, extensionRegistry);
+ return new ModifyTableStateData(input, extensionRegistry);
}
};
@java.lang.Override
- public com.google.protobuf.Parser<DeleteTableStateData> getParserForType() {
+ public com.google.protobuf.Parser<ModifyTableStateData> getParserForType() {
return PARSER;
}
@@ -1608,68 +1741,71 @@ public final class MasterProcedureProtos {
return userInfo_;
}
- // required .TableName table_name = 2;
- public static final int TABLE_NAME_FIELD_NUMBER = 2;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ // optional .TableSchema unmodified_table_schema = 2;
+ public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_;
/**
- * <code>required .TableName table_name = 2;</code>
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
*/
- public boolean hasTableName() {
+ public boolean hasUnmodifiedTableSchema() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * <code>required .TableName table_name = 2;</code>
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
- return tableName_;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ return unmodifiedTableSchema_;
}
/**
- * <code>required .TableName table_name = 2;</code>
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
- return tableName_;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ return unmodifiedTableSchema_;
}
- // repeated .RegionInfo region_info = 3;
- public static final int REGION_INFO_FIELD_NUMBER = 3;
- private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
+ // required .TableSchema modified_table_schema = 3;
+ public static final int MODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_;
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required .TableSchema modified_table_schema = 3;</code>
*/
- public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
- return regionInfo_;
+ public boolean hasModifiedTableSchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required .TableSchema modified_table_schema = 3;</code>
*/
- public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
- getRegionInfoOrBuilderList() {
- return regionInfo_;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() {
+ return modifiedTableSchema_;
}
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required .TableSchema modified_table_schema = 3;</code>
*/
- public int getRegionInfoCount() {
- return regionInfo_.size();
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() {
+ return modifiedTableSchema_;
}
+
+ // required bool delete_column_family_in_modify = 4;
+ public static final int DELETE_COLUMN_FAMILY_IN_MODIFY_FIELD_NUMBER = 4;
+ private boolean deleteColumnFamilyInModify_;
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required bool delete_column_family_in_modify = 4;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
- return regionInfo_.get(index);
+ public boolean hasDeleteColumnFamilyInModify() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
- * <code>repeated .RegionInfo region_info = 3;</code>
+ * <code>required bool delete_column_family_in_modify = 4;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
- int index) {
- return regionInfo_.get(index);
+ public boolean getDeleteColumnFamilyInModify() {
+ return deleteColumnFamilyInModify_;
}
private void initFields() {
userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
- regionInfo_ = java.util.Collections.emptyList();
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ deleteColumnFamilyInModify_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -1680,24 +1816,28 @@ public final class MasterProcedureProtos {
memoizedIsInitialized = 0;
return false;
}
- if (!hasTableName()) {
+ if (!hasModifiedTableSchema()) {
memoizedIsInitialized = 0;
return false;
}
- if (!getUserInfo().isInitialized()) {
+ if (!hasDeleteColumnFamilyInModify()) {
memoizedIsInitialized = 0;
return false;
}
- if (!getTableName().isInitialized()) {
+ if (!getUserInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
- for (int i = 0; i < getRegionInfoCount(); i++) {
- if (!getRegionInfo(i).isInitialized()) {
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
+ if (!getModifiedTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
memoizedIsInitialized = 1;
return true;
}
@@ -1709,10 +1849,13 @@ public final class MasterProcedureProtos {
output.writeMessage(1, userInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeMessage(2, tableName_);
+ output.writeMessage(2, unmodifiedTableSchema_);
}
- for (int i = 0; i < regionInfo_.size(); i++) {
- output.writeMessage(3, regionInfo_.get(i));
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, modifiedTableSchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBool(4, deleteColumnFamilyInModify_);
}
getUnknownFields().writeTo(output);
}
@@ -1729,11 +1872,15 @@ public final class MasterProcedureProtos {
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, tableName_);
+ .computeMessageSize(2, unmodifiedTableSchema_);
}
- for (int i = 0; i < regionInfo_.size(); i++) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, regionInfo_.get(i));
+ .computeMessageSize(3, modifiedTableSchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(4, deleteColumnFamilyInModify_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -1752,10 +1899,10 @@ public final class MasterProcedureProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)) {
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData)) {
return super.equals(obj);
}
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) obj;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData) obj;
boolean result = true;
result = result && (hasUserInfo() == other.hasUserInfo());
@@ -1763,13 +1910,21 @@ public final class MasterProcedureProtos {
result = result && getUserInfo()
.equals(other.getUserInfo());
}
- result = result && (hasTableName() == other.hasTableName());
- if (hasTableName()) {
- result = result && getTableName()
- .equals(other.getTableName());
+ result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema());
+ if (hasUnmodifiedTableSchema()) {
+ result = result && getUnmodifiedTableSchema()
+ .equals(other.getUnmodifiedTableSchema());
+ }
+ result = result && (hasModifiedTableSchema() == other.hasModifiedTableSchema());
+ if (hasModifiedTableSchema()) {
+ result = result && getModifiedTableSchema()
+ .equals(other.getModifiedTableSchema());
+ }
+ result = result && (hasDeleteColumnFamilyInModify() == other.hasDeleteColumnFamilyInModify());
+ if (hasDeleteColumnFamilyInModify()) {
+ result = result && (getDeleteColumnFamilyInModify()
+ == other.getDeleteColumnFamilyInModify());
}
- result = result && getRegionInfoList()
- .equals(other.getRegionInfoList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -1787,66 +1942,70 @@ public final class MasterProcedureProtos {
hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
hash = (53 * hash) + getUserInfo().hashCode();
}
- if (hasTableName()) {
- hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
- hash = (53 * hash) + getTableName().hashCode();
+ if (hasUnmodifiedTableSchema()) {
+ hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getUnmodifiedTableSchema().hashCode();
}
- if (getRegionInfoCount() > 0) {
- hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
- hash = (53 * hash) + getRegionInfoList().hashCode();
+ if (hasModifiedTableSchema()) {
+ hash = (37 * hash) + MODIFIED_TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getModifiedTableSchema().hashCode();
+ }
+ if (hasDeleteColumnFamilyInModify()) {
+ hash = (37 * hash) + DELETE_COLUMN_FAMILY_IN_MODIFY_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getDeleteColumnFamilyInModify());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(byte[] data)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -1855,7 +2014,7 @@ public final class MasterProcedureProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -1867,24 +2026,24 @@ public final class MasterProcedureProtos {
return builder;
}
/**
- * Protobuf type {@code DeleteTableStateData}
+ * Protobuf type {@code ModifyTableStateData}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateDataOrBuilder {
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateDataOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.Builder.class);
}
- // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.newBuilder()
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -1897,8 +2056,8 @@ public final class MasterProcedureProtos {
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getUserInfoFieldBuilder();
- getTableNameFieldBuilder();
- getRegionInfoFieldBuilder();
+ getUnmodifiedTableSchemaFieldBuilder();
+ getModifiedTableSchemaFieldBuilder();
}
}
private static Builder create() {
@@ -1913,18 +2072,20 @@ public final class MasterProcedureProtos {
userInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
- if (tableNameBuilder_ == null) {
- tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
} else {
- tableNameBuilder_.clear();
+ unmodifiedTableSchemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
- if (regionInfoBuilder_ == null) {
- regionInfo_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
} else {
- regionInfoBuilder_.clear();
+ modifiedTableSchemaBuilder_.clear();
}
+ bitField0_ = (bitField0_ & ~0x00000004);
+ deleteColumnFamilyInModify_ = false;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@@ -1934,23 +2095,23 @@ public final class MasterProcedureProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_descriptor;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.getDefaultInstance();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.getDefaultInstance();
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData build() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = buildPartial();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData(this);
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -1964,28 +2125,1083 @@ public final class MasterProcedureProtos {
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- if (tableNameBuilder_ == null) {
- result.tableName_ = tableName_;
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ result.unmodifiedTableSchema_ = unmodifiedTableSchema_;
} else {
- result.tableName_ = tableNameBuilder_.build();
+ result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build();
}
- if (regionInfoBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
- bitField0_ = (bitField0_ & ~0x00000004);
- }
- result.regionInfo_ = regionInfo_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (modifiedTableSchemaBuilder_ == null) {
+ result.modifiedTableSchema_ = modifiedTableSchema_;
} else {
- result.regionInfo_ = regionInfoBuilder_.build();
+ result.modifiedTableSchema_ = modifiedTableSchemaBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
}
+ result.deleteColumnFamilyInModify_ = deleteColumnFamilyInModify_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)other);
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasUnmodifiedTableSchema()) {
+ mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema());
+ }
+ if (other.hasModifiedTableSchema()) {
+ mergeModifiedTableSchema(other.getModifiedTableSchema());
+ }
+ if (other.hasDeleteColumnFamilyInModify()) {
+ setDeleteColumnFamilyInModify(other.getDeleteColumnFamilyInModify());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasModifiedTableSchema()) {
+
+ return false;
+ }
+ if (!hasDeleteColumnFamilyInModify()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
+
+ return false;
+ }
+ }
+ if (!getModifiedTableSchema().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // optional .TableSchema unmodified_table_schema = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_;
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public boolean hasUnmodifiedTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ return unmodifiedTableSchema_;
+ } else {
+ return unmodifiedTableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ unmodifiedTableSchema_ = value;
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public Builder setUnmodifiedTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ unmodifiedTableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ unmodifiedTableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public Builder clearUnmodifiedTableSchema() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getUnmodifiedTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ if (unmodifiedTableSchemaBuilder_ != null) {
+ return unmodifiedTableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return unmodifiedTableSchema_;
+ }
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getUnmodifiedTableSchemaFieldBuilder() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ unmodifiedTableSchema_,
+ getParentForChildren(),
+ isClean());
+ unmodifiedTableSchema_ = null;
+ }
+ return unmodifiedTableSchemaBuilder_;
+ }
+
+ // required .TableSchema modified_table_schema = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> modifiedTableSchemaBuilder_;
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public boolean hasModifiedTableSchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ return modifiedTableSchema_;
+ } else {
+ return modifiedTableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder setModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ modifiedTableSchema_ = value;
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder setModifiedTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder mergeModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ modifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ modifiedTableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(modifiedTableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ modifiedTableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder clearModifiedTableSchema() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getModifiedTableSchemaBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getModifiedTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() {
+ if (modifiedTableSchemaBuilder_ != null) {
+ return modifiedTableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return modifiedTableSchema_;
+ }
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getModifiedTableSchemaFieldBuilder() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ modifiedTableSchema_,
+ getParentForChildren(),
+ isClean());
+ modifiedTableSchema_ = null;
+ }
+ return modifiedTableSchemaBuilder_;
+ }
+
+ // required bool delete_column_family_in_modify = 4;
+ private boolean deleteColumnFamilyInModify_ ;
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public boolean hasDeleteColumnFamilyInModify() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public boolean getDeleteColumnFamilyInModify() {
+ return deleteColumnFamilyInModify_;
+ }
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public Builder setDeleteColumnFamilyInModify(boolean value) {
+ bitField0_ |= 0x00000008;
+ deleteColumnFamilyInModify_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public Builder clearDeleteColumnFamilyInModify() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ deleteColumnFamilyInModify_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ModifyTableStateData)
+ }
+
+ static {
+ defaultInstance = new ModifyTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ModifyTableStateData)
+ }
+
+ public interface DeleteTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // repeated .RegionInfo region_info = 3;
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>
+ getRegionInfoList();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ int getRegionInfoCount();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code DeleteTableStateData}
+ */
+ public static final class DeleteTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements DeleteTableStateDataOrBuilder {
+ // Use DeleteTableStateData.newBuilder() to construct.
+ private DeleteTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DeleteTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DeleteTableStateData defaultInstance;
+ public static DeleteTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DeleteTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DeleteTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<DeleteTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<DeleteTableStateData>() {
+ public DeleteTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DeleteTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<DeleteTableStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ public static final int REGION_INFO_FIELD_NUMBER = 3;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(3, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code DeleteTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)other);
} else {
super.mergeFrom(other);
return this;
@@ -2566,6 +3782,11 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_CreateTableStateData_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ModifyTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ModifyTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_DeleteTableStateData_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -2583,23 +3804,35 @@ public final class MasterProcedureProtos {
"C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
"_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" +
"chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" +
- "\030\003 \003(\0132\013.RegionInfo\"}\n\024DeleteTableStateD" +
- "ata\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation" +
- "\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022 \n\013regi" +
- "on_info\030\003 \003(\0132\013.RegionInfo*\330\001\n\020CreateTab" +
- "leState\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022" +
- " \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CRE",
- "ATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_" +
- "ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_" +
- "DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERAT" +
- "ION\020\006*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TAB" +
- "LE_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOV" +
- "E_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_L" +
- "AYOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACH" +
- "E\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037" +
- "\n\033DELETE_TABLE_POST_OPERATION\020\006BK\n*org.a" +
- "pache.hadoop.hbase.protobuf.generatedB\025M",
- "asterProcedureProtosH\001\210\001\001\240\001\001"
+ "\030\003 \003(\0132\013.RegionInfo\"\277\001\n\024ModifyTableState" +
+ "Data\022#\n\tuser_info\030\001 \002(\0132\020.UserInformatio" +
+ "n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
+ "leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
+ "\014.TableSchema\022&\n\036delete_column_family_in" +
+ "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
+ "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
+ "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
+ "o\030\003 \003(\0132\013.RegionInfo*\330\001\n\020CreateTableStat" +
+ "e\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034CRE" +
+ "ATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TA" +
+ "BLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN" +
+ "_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_C" +
+ "ACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020\006*" +
+ "\207\002\n\020ModifyTableState\022\030\n\024MODIFY_TABLE_PRE" +
+ "PARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(",
+ "\n$MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003" +
+ "\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN\020\004" +
+ "\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033M" +
+ "ODIFY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY_T" +
+ "ABLE_REOPEN_ALL_REGIONS\020\007*\337\001\n\020DeleteTabl" +
+ "eState\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!" +
+ "\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DEL" +
+ "ETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TA" +
+ "BLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_" +
+ "UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_",
+ "OPERATION\020\006BK\n*org.apache.hadoop.hbase.p" +
+ "rotobuf.generatedB\025MasterProcedureProtos" +
+ "H\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -2612,8 +3845,14 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CreateTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableSchema", "RegionInfo", });
- internal_static_DeleteTableStateData_descriptor =
+ internal_static_ModifyTableStateData_descriptor =
getDescriptor().getMessageTypes().get(1);
+ internal_static_ModifyTableStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ModifyTableStateData_descriptor,
+ new ja
<TRUNCATED>
[06/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
new file mode 100644
index 0000000..13f7bfa
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -0,0 +1,721 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
+import org.apache.hadoop.hbase.procedure2.util.ByteSlot;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
+
+/**
+ * WAL implementation of the ProcedureStore.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class WALProcedureStore implements ProcedureStore {
+ private static final Log LOG = LogFactory.getLog(WALProcedureStore.class);
+
+ public interface LeaseRecovery {
+ void recoverFileLease(FileSystem fs, Path path) throws IOException;
+ }
+
+ private static final int MAX_RETRIES_BEFORE_ABORT = 3;
+
+ private static final String SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec";
+ private static final int DEFAULT_SYNC_WAIT_MSEC = 100;
+
+ private final CopyOnWriteArrayList<ProcedureStoreListener> listeners =
+ new CopyOnWriteArrayList<ProcedureStoreListener>();
+
+ private final LinkedList<ProcedureWALFile> logs = new LinkedList<ProcedureWALFile>();
+ private final ProcedureStoreTracker storeTracker = new ProcedureStoreTracker();
+ private final AtomicBoolean running = new AtomicBoolean(false);
+ private final ReentrantLock lock = new ReentrantLock();
+ private final Condition waitCond = lock.newCondition();
+ private final Condition slotCond = lock.newCondition();
+ private final Condition syncCond = lock.newCondition();
+
+ private final LeaseRecovery leaseRecovery;
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final Path logDir;
+
+ private AtomicBoolean inSync = new AtomicBoolean(false);
+ private ArrayBlockingQueue<ByteSlot> slotsCache = null;
+ private Set<ProcedureWALFile> corruptedLogs = null;
+ private FSDataOutputStream stream = null;
+ private long totalSynced = 0;
+ private long flushLogId = 0;
+ private int slotIndex = 0;
+ private Thread syncThread;
+ private ByteSlot[] slots;
+ private int syncWaitMsec;
+
+ public WALProcedureStore(final Configuration conf, final FileSystem fs, final Path logDir,
+ final LeaseRecovery leaseRecovery) {
+ this.fs = fs;
+ this.conf = conf;
+ this.logDir = logDir;
+ this.leaseRecovery = leaseRecovery;
+ }
+
+ @Override
+ public void start(int numSlots) throws IOException {
+ if (running.getAndSet(true)) {
+ return;
+ }
+
+ // Init buffer slots
+ slots = new ByteSlot[numSlots];
+ slotsCache = new ArrayBlockingQueue(numSlots, true);
+ while (slotsCache.remainingCapacity() > 0) {
+ slotsCache.offer(new ByteSlot());
+ }
+
+ // Tunings
+ syncWaitMsec = conf.getInt(SYNC_WAIT_MSEC_CONF_KEY, DEFAULT_SYNC_WAIT_MSEC);
+
+ // Init sync thread
+ syncThread = new Thread("WALProcedureStoreSyncThread") {
+ @Override
+ public void run() {
+ while (running.get()) {
+ try {
+ syncLoop();
+ } catch (IOException e) {
+ LOG.error("got an exception from the sync-loop", e);
+ sendAbortProcessSignal();
+ }
+ }
+ }
+ };
+ syncThread.start();
+ }
+
+ @Override
+ public void stop(boolean abort) {
+ if (!running.getAndSet(false)) {
+ return;
+ }
+
+ LOG.info("Stopping the WAL Procedure Store");
+ if (lock.tryLock()) {
+ try {
+ waitCond.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ if (!abort) {
+ try {
+ syncThread.join();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ // Close the writer
+ closeStream();
+
+ // Close the old logs
+ // they should be already closed, this is just in case the load fails
+ // and we call start() and then stop()
+ for (ProcedureWALFile log: logs) {
+ log.close();
+ }
+ logs.clear();
+ }
+
+ @Override
+ public boolean isRunning() {
+ return running.get();
+ }
+
+ @Override
+ public int getNumThreads() {
+ return slots == null ? 0 : slots.length;
+ }
+
+ public ProcedureStoreTracker getStoreTracker() {
+ return storeTracker;
+ }
+
+ @Override
+ public void registerListener(ProcedureStoreListener listener) {
+ this.listeners.add(listener);
+ }
+
+ @Override
+ public boolean unregisterListener(ProcedureStoreListener listener) {
+ return this.listeners.remove(listener);
+ }
+
+ @Override
+ public void recoverLease() throws IOException {
+ LOG.info("Starting WAL Procedure Store lease recovery");
+ FileStatus[] oldLogs = getLogFiles();
+ while (running.get()) {
+ // Get Log-MaxID and recover lease on old logs
+ flushLogId = initOldLogs(oldLogs) + 1;
+
+ // Create new state-log
+ if (!rollWriter(flushLogId)) {
+ // someone else has already created this log
+ LOG.debug("someone else has already created log " + flushLogId);
+ continue;
+ }
+
+ // We have the lease on the log
+ oldLogs = getLogFiles();
+ if (getMaxLogId(oldLogs) > flushLogId) {
+ // Someone else created new logs
+ LOG.debug("someone else created new logs. expected maxLogId < " + flushLogId);
+ logs.getLast().removeFile();
+ continue;
+ }
+
+ LOG.info("lease acquired flushLogId=" + flushLogId);
+ break;
+ }
+ }
+
+ @Override
+ public Iterator<Procedure> load() throws IOException {
+ if (logs.isEmpty()) {
+ throw new RuntimeException("recoverLease() must be called before loading data");
+ }
+
+ // Nothing to do, If we have only the current log.
+ if (logs.size() == 1) {
+ LOG.debug("No state logs to replay");
+ return null;
+ }
+
+ // Load the old logs
+ final ArrayList<ProcedureWALFile> toRemove = new ArrayList<ProcedureWALFile>();
+ Iterator<ProcedureWALFile> it = logs.descendingIterator();
+ it.next(); // Skip the current log
+ try {
+ return ProcedureWALFormat.load(it, storeTracker, new ProcedureWALFormat.Loader() {
+ @Override
+ public void removeLog(ProcedureWALFile log) {
+ toRemove.add(log);
+ }
+
+ @Override
+ public void markCorruptedWAL(ProcedureWALFile log, IOException e) {
+ if (corruptedLogs == null) {
+ corruptedLogs = new HashSet<ProcedureWALFile>();
+ }
+ corruptedLogs.add(log);
+ // TODO: sideline corrupted log
+ }
+ });
+ } finally {
+ if (!toRemove.isEmpty()) {
+ for (ProcedureWALFile log: toRemove) {
+ removeLogFile(log);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void insert(final Procedure proc, final Procedure[] subprocs) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("insert " + proc + " subproc=" + Arrays.toString(subprocs));
+ }
+
+ ByteSlot slot = acquireSlot();
+ long logId = -1;
+ try {
+ // Serialize the insert
+ if (subprocs != null) {
+ ProcedureWALFormat.writeInsert(slot, proc, subprocs);
+ } else {
+ assert !proc.hasParent();
+ ProcedureWALFormat.writeInsert(slot, proc);
+ }
+
+ // Push the transaction data and wait until it is persisted
+ logId = pushData(slot);
+ } catch (IOException e) {
+ // We are not able to serialize the procedure.
+ // this is a code error, and we are not able to go on.
+ LOG.fatal("Unable to serialize one of the procedure: proc=" + proc +
+ " subprocs=" + Arrays.toString(subprocs), e);
+ throw new RuntimeException(e);
+ } finally {
+ releaseSlot(slot);
+ }
+
+ // Update the store tracker
+ synchronized (storeTracker) {
+ if (logId == flushLogId) {
+ storeTracker.insert(proc, subprocs);
+ }
+ }
+ }
+
+ @Override
+ public void update(final Procedure proc) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("update " + proc);
+ }
+
+ ByteSlot slot = acquireSlot();
+ long logId = -1;
+ try {
+ // Serialize the update
+ ProcedureWALFormat.writeUpdate(slot, proc);
+
+ // Push the transaction data and wait until it is persisted
+ logId = pushData(slot);
+ } catch (IOException e) {
+ // We are not able to serialize the procedure.
+ // this is a code error, and we are not able to go on.
+ LOG.fatal("Unable to serialize the procedure: " + proc, e);
+ throw new RuntimeException(e);
+ } finally {
+ releaseSlot(slot);
+ }
+
+ // Update the store tracker
+ boolean removeOldLogs = false;
+ synchronized (storeTracker) {
+ if (logId == flushLogId) {
+ storeTracker.update(proc);
+ removeOldLogs = storeTracker.isUpdated();
+ }
+ }
+
+ if (removeOldLogs) {
+ removeAllLogs(logId - 1);
+ }
+ }
+
+ @Override
+ public void delete(final long procId) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("delete " + procId);
+ }
+
+ ByteSlot slot = acquireSlot();
+ long logId = -1;
+ try {
+ // Serialize the delete
+ ProcedureWALFormat.writeDelete(slot, procId);
+
+ // Push the transaction data and wait until it is persisted
+ logId = pushData(slot);
+ } catch (IOException e) {
+ // We are not able to serialize the procedure.
+ // this is a code error, and we are not able to go on.
+ LOG.fatal("Unable to serialize the procedure: " + procId, e);
+ throw new RuntimeException(e);
+ } finally {
+ releaseSlot(slot);
+ }
+
+ boolean removeOldLogs = false;
+ synchronized (storeTracker) {
+ if (logId == flushLogId) {
+ storeTracker.delete(procId);
+ if (storeTracker.isEmpty()) {
+ removeOldLogs = rollWriterOrDie(logId + 1);
+ }
+ }
+ }
+
+ if (removeOldLogs) {
+ removeAllLogs(logId);
+ }
+ }
+
+ private ByteSlot acquireSlot() {
+ ByteSlot slot = slotsCache.poll();
+ return slot != null ? slot : new ByteSlot();
+ }
+
+ private void releaseSlot(final ByteSlot slot) {
+ slot.reset();
+ slotsCache.offer(slot);
+ }
+
+ private long pushData(final ByteSlot slot) {
+ assert !logs.isEmpty() : "recoverLease() must be called before inserting data";
+ long logId = -1;
+
+ lock.lock();
+ try {
+ // Wait for the sync to be completed
+ while (true) {
+ if (inSync.get()) {
+ syncCond.await();
+ } else if (slotIndex == slots.length) {
+ slotCond.signal();
+ syncCond.await();
+ } else {
+ break;
+ }
+ }
+
+ slots[slotIndex++] = slot;
+ logId = flushLogId;
+
+ // Notify that there is new data
+ if (slotIndex == 1) {
+ waitCond.signal();
+ }
+
+ // Notify that the slots are full
+ if (slotIndex == slots.length) {
+ slotCond.signal();
+ }
+ syncCond.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ sendAbortProcessSignal();
+ } finally {
+ lock.unlock();
+ }
+ return logId;
+ }
+
+ private void syncLoop() throws IOException {
+ inSync.set(false);
+ while (running.get()) {
+ lock.lock();
+ try {
+ // Wait until new data is available
+ if (slotIndex == 0) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Waiting for data. flushed=" + StringUtils.humanSize(totalSynced));
+ }
+ waitCond.await();
+ if (slotIndex == 0) {
+ // no data.. probably a stop()
+ continue;
+ }
+ }
+
+ // Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing
+ slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS);
+
+ inSync.set(true);
+ totalSynced += syncSlots();
+ slotIndex = 0;
+ inSync.set(false);
+ syncCond.signalAll();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ sendAbortProcessSignal();
+ } finally {
+ lock.unlock();
+ }
+ }
+ }
+
+ private long syncSlots() {
+ int retry = 0;
+ long totalSynced = 0;
+ do {
+ try {
+ totalSynced = syncSlots(stream, slots, 0, slotIndex);
+ break;
+ } catch (Throwable e) {
+ if (++retry == MAX_RETRIES_BEFORE_ABORT) {
+ LOG.error("sync slot failed, abort.", e);
+ sendAbortProcessSignal();
+ }
+ }
+ } while (running.get());
+ return totalSynced;
+ }
+
+ protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count)
+ throws IOException {
+ long totalSynced = 0;
+ for (int i = 0; i < count; ++i) {
+ ByteSlot data = slots[offset + i];
+ data.writeTo(stream);
+ totalSynced += data.size();
+ }
+ stream.hsync();
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Sync slots=" + count + '/' + slots.length +
+ " flushed=" + StringUtils.humanSize(totalSynced));
+ }
+ return totalSynced;
+ }
+
+ private void sendAbortProcessSignal() {
+ if (!this.listeners.isEmpty()) {
+ for (ProcedureStoreListener listener : this.listeners) {
+ listener.abortProcess();
+ }
+ }
+ }
+
+ private boolean rollWriterOrDie(final long logId) {
+ try {
+ return rollWriter(logId);
+ } catch (IOException e) {
+ LOG.warn("Unable to roll the log", e);
+ sendAbortProcessSignal();
+ return false;
+ }
+ }
+
+ private boolean rollWriter(final long logId) throws IOException {
+ ProcedureWALHeader header = ProcedureWALHeader.newBuilder()
+ .setVersion(ProcedureWALFormat.HEADER_VERSION)
+ .setType(ProcedureWALFormat.LOG_TYPE_STREAM)
+ .setMinProcId(storeTracker.getMinProcId())
+ .setLogId(logId)
+ .build();
+
+ FSDataOutputStream newStream = null;
+ Path newLogFile = null;
+ long startPos = -1;
+ try {
+ newLogFile = getLogFilePath(logId);
+ newStream = fs.create(newLogFile, false);
+ ProcedureWALFormat.writeHeader(newStream, header);
+ startPos = newStream.getPos();
+ } catch (FileAlreadyExistsException e) {
+ LOG.error("Log file with id=" + logId + " already exists", e);
+ return false;
+ }
+ lock.lock();
+ try {
+ closeStream();
+ synchronized (storeTracker) {
+ storeTracker.resetUpdates();
+ }
+ stream = newStream;
+ flushLogId = logId;
+ totalSynced = 0;
+ logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos));
+ } finally {
+ lock.unlock();
+ }
+ LOG.info("Roll new state log: " + logId);
+ return true;
+ }
+
+ private void closeStream() {
+ try {
+ if (stream != null) {
+ try {
+ ProcedureWALFormat.writeTrailer(stream, storeTracker);
+ } catch (IOException e) {
+ LOG.warn("Unable to write the trailer: " + e.getMessage());
+ }
+ stream.close();
+ }
+ } catch (IOException e) {
+ LOG.error("Unable to close the stream", e);
+ } finally {
+ stream = null;
+ }
+ }
+
+ private void removeAllLogs(long lastLogId) {
+ LOG.info("Remove all state logs with ID less then " + lastLogId);
+ while (!logs.isEmpty()) {
+ ProcedureWALFile log = logs.getFirst();
+ if (lastLogId < log.getLogId()) {
+ break;
+ }
+
+ removeLogFile(log);
+ }
+ }
+
+ private boolean removeLogFile(final ProcedureWALFile log) {
+ try {
+ LOG.debug("remove log: " + log);
+ log.removeFile();
+ logs.remove(log);
+ } catch (IOException e) {
+ LOG.error("unable to remove log " + log, e);
+ return false;
+ }
+ return true;
+ }
+
+ public Set<ProcedureWALFile> getCorruptedLogs() {
+ return corruptedLogs;
+ }
+
+ // ==========================================================================
+ // FileSystem Log Files helpers
+ // ==========================================================================
+ public Path getLogDir() {
+ return this.logDir;
+ }
+
+ public FileSystem getFileSystem() {
+ return this.fs;
+ }
+
+ protected Path getLogFilePath(final long logId) throws IOException {
+ return new Path(logDir, String.format("state-%020d.log", logId));
+ }
+
+ private static long getLogIdFromName(final String name) {
+ int end = name.lastIndexOf(".log");
+ int start = name.lastIndexOf('-') + 1;
+ while (start < end) {
+ if (name.charAt(start) != '0')
+ break;
+ start++;
+ }
+ return Long.parseLong(name.substring(start, end));
+ }
+
+ private FileStatus[] getLogFiles() throws IOException {
+ try {
+ return fs.listStatus(logDir, new PathFilter() {
+ @Override
+ public boolean accept(Path path) {
+ String name = path.getName();
+ return name.startsWith("state-") && name.endsWith(".log");
+ }
+ });
+ } catch (FileNotFoundException e) {
+ LOG.warn("log directory not found: " + e.getMessage());
+ return null;
+ }
+ }
+
+ private long getMaxLogId(final FileStatus[] logFiles) {
+ long maxLogId = 0;
+ if (logFiles != null && logFiles.length > 0) {
+ for (int i = 0; i < logFiles.length; ++i) {
+ maxLogId = Math.max(maxLogId, getLogIdFromName(logFiles[i].getPath().getName()));
+ }
+ }
+ return maxLogId;
+ }
+
+ /**
+ * @return Max-LogID of the specified log file set
+ */
+ private long initOldLogs(final FileStatus[] logFiles) throws IOException {
+ this.logs.clear();
+
+ long maxLogId = 0;
+ if (logFiles != null && logFiles.length > 0) {
+ for (int i = 0; i < logFiles.length; ++i) {
+ final Path logPath = logFiles[i].getPath();
+ leaseRecovery.recoverFileLease(fs, logPath);
+ maxLogId = Math.max(maxLogId, getLogIdFromName(logPath.getName()));
+
+ ProcedureWALFile log = initOldLog(logFiles[i]);
+ if (log != null) {
+ this.logs.add(log);
+ }
+ }
+ Collections.sort(this.logs);
+ initTrackerFromOldLogs();
+ }
+ return maxLogId;
+ }
+
+ private void initTrackerFromOldLogs() {
+ // TODO: Load the most recent tracker available
+ if (!logs.isEmpty()) {
+ ProcedureWALFile log = logs.getLast();
+ try {
+ log.readTracker(storeTracker);
+ } catch (IOException e) {
+ LOG.error("Unable to read tracker for " + log, e);
+ // try the next one...
+ storeTracker.clear();
+ storeTracker.setPartialFlag(true);
+ }
+ }
+ }
+
+ private ProcedureWALFile initOldLog(final FileStatus logFile) throws IOException {
+ ProcedureWALFile log = new ProcedureWALFile(fs, logFile);
+ if (logFile.getLen() == 0) {
+ LOG.warn("Remove uninitialized log " + logFile);
+ log.removeFile();
+ return null;
+ }
+
+ LOG.debug("opening state-log: " + logFile);
+ try {
+ log.open();
+ } catch (ProcedureWALFormat.InvalidWALDataException e) {
+ LOG.warn("Remove uninitialized log " + logFile, e);
+ log.removeFile();
+ return null;
+ } catch (IOException e) {
+ String msg = "Unable to read state log: " + logFile;
+ LOG.error(msg, e);
+ throw new IOException(msg, e);
+ }
+
+ if (log.isCompacted()) {
+ try {
+ log.readTrailer();
+ } catch (IOException e) {
+ // unfinished compacted log throw it away
+ LOG.warn("Unfinished compacted log " + logFile, e);
+ log.removeFile();
+ return null;
+ }
+ }
+ return log;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java
new file mode 100644
index 0000000..8904116
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.util;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header.
+ * e.g. you write some data and you want to prepend an header that contains the data len or cksum.
+ * <code>
+ * ByteSlot slot = new ByteSlot();
+ * // write data
+ * slot.write(...);
+ * slot.write(...);
+ * // write header with the size of the written data
+ * slot.markHead();
+ * slot.write(Bytes.toBytes(slot.size()));
+ * // flush to stream as [header, data]
+ * slot.writeTo(stream);
+ * </code>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ByteSlot extends OutputStream {
+ private static final int DOUBLE_GROW_LIMIT = 1 << 20;
+ private static final int GROW_ALIGN = 128;
+
+ private byte[] buf;
+ private int head;
+ private int size;
+
+ public void reset() {
+ head = 0;
+ size = 0;
+ }
+
+ public void markHead() {
+ head = size;
+ }
+
+ public int getHead() {
+ return head;
+ }
+
+ public int size() {
+ return size;
+ }
+
+ public byte[] getBuffer() {
+ return buf;
+ }
+
+ public void writeAt(int offset, int b) {
+ head = Math.min(head, offset);
+ buf[offset] = (byte)b;
+ }
+
+ public void write(int b) {
+ ensureCapacity(size + 1);
+ buf[size++] = (byte)b;
+ }
+
+ public void write(byte[] b, int off, int len) {
+ ensureCapacity(size + len);
+ System.arraycopy(b, off, buf, size, len);
+ size += len;
+ }
+
+ public void writeTo(final OutputStream stream) throws IOException {
+ if (head != 0) {
+ stream.write(buf, head, size - head);
+ stream.write(buf, 0, head);
+ } else {
+ stream.write(buf, 0, size);
+ }
+ }
+
+ private void ensureCapacity(int minCapacity) {
+ minCapacity = (minCapacity + (GROW_ALIGN - 1)) & -GROW_ALIGN;
+ if (buf == null) {
+ buf = new byte[minCapacity];
+ } else if (minCapacity > buf.length) {
+ int newCapacity = buf.length << 1;
+ if (minCapacity > newCapacity || newCapacity > DOUBLE_GROW_LIMIT) {
+ newCapacity = minCapacity;
+ }
+ buf = Arrays.copyOf(buf, newCapacity);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java
new file mode 100644
index 0000000..97134c2
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.util;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class StringUtils {
+ private StringUtils() {}
+
+ public static String humanTimeDiff(long timeDiff) {
+ StringBuilder buf = new StringBuilder();
+ long hours = timeDiff / (60*60*1000);
+ long rem = (timeDiff % (60*60*1000));
+ long minutes = rem / (60*1000);
+ rem = rem % (60*1000);
+ float seconds = rem / 1000.0f;
+
+ if (hours != 0){
+ buf.append(hours);
+ buf.append("hrs, ");
+ }
+ if (minutes != 0){
+ buf.append(minutes);
+ buf.append("mins, ");
+ }
+ if (hours > 0 || minutes > 0) {
+ buf.append(seconds);
+ buf.append("sec");
+ } else {
+ buf.append(String.format("%.4fsec", seconds));
+ }
+ return buf.toString();
+ }
+
+ public static String humanSize(double size) {
+ if (size >= (1L << 40)) return String.format("%.1fT", size / (1L << 40));
+ if (size >= (1L << 30)) return String.format("%.1fG", size / (1L << 30));
+ if (size >= (1L << 20)) return String.format("%.1fM", size / (1L << 20));
+ if (size >= (1L << 10)) return String.format("%.1fK", size / (1L << 10));
+ return String.format("%.0f", size);
+ }
+
+ public static boolean isEmpty(final String input) {
+ return input == null || input.length() == 0;
+ }
+
+ public static String buildString(final String... parts) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < parts.length; ++i) {
+ sb.append(parts[i]);
+ }
+ return sb.toString();
+ }
+
+ public static StringBuilder appendStrings(final StringBuilder sb, final String... parts) {
+ for (int i = 0; i < parts.length; ++i) {
+ sb.append(parts[i]);
+ }
+ return sb;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
new file mode 100644
index 0000000..f710ef4
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.util;
+
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class TimeoutBlockingQueue<E> {
+ public static interface TimeoutRetriever<T> {
+ long getTimeout(T object);
+ TimeUnit getTimeUnit(T object);
+ }
+
+ private final ReentrantLock lock = new ReentrantLock();
+ private final Condition waitCond = lock.newCondition();
+ private final TimeoutRetriever<? super E> timeoutRetriever;
+
+ private E[] objects;
+ private int head = 0;
+ private int tail = 0;
+
+ public TimeoutBlockingQueue(TimeoutRetriever<? super E> timeoutRetriever) {
+ this(32, timeoutRetriever);
+ }
+
+ @SuppressWarnings("unchecked")
+ public TimeoutBlockingQueue(int capacity, TimeoutRetriever<? super E> timeoutRetriever) {
+ this.objects = (E[])new Object[capacity];
+ this.timeoutRetriever = timeoutRetriever;
+ }
+
+ public void dump() {
+ for (int i = 0; i < objects.length; ++i) {
+ if (i == head) {
+ System.out.print("[" + objects[i] + "] ");
+ } else if (i == tail) {
+ System.out.print("]" + objects[i] + "[ ");
+ } else {
+ System.out.print(objects[i] + " ");
+ }
+ }
+ System.out.println();
+ }
+
+ public void clear() {
+ lock.lock();
+ try {
+ if (head != tail) {
+ for (int i = head; i < tail; ++i) {
+ objects[i] = null;
+ }
+ head = 0;
+ tail = 0;
+ waitCond.signal();
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ public void add(E e) {
+ if (e == null) throw new NullPointerException();
+
+ lock.lock();
+ try {
+ addElement(e);
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
+ public E poll() {
+ lock.lock();
+ try {
+ if (isEmpty()) {
+ waitCond.await();
+ return null;
+ }
+
+ E elem = objects[head];
+ long nanos = getNanosTimeout(elem);
+ nanos = waitCond.awaitNanos(nanos);
+ return nanos > 0 ? null : removeFirst();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return null;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ public int size() {
+ return tail - head;
+ }
+
+ public boolean isEmpty() {
+ return (tail - head) == 0;
+ }
+
+ public void signalAll() {
+ lock.lock();
+ try {
+ waitCond.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ private void addElement(E elem) {
+ int size = (tail - head);
+ if ((objects.length - size) == 0) {
+ int capacity = size + ((size < 64) ? (size + 2) : (size >> 1));
+ E[] newObjects = (E[])new Object[capacity];
+
+ if (compareTimeouts(objects[tail - 1], elem) <= 0) {
+ // Append
+ System.arraycopy(objects, head, newObjects, 0, tail);
+ tail -= head;
+ newObjects[tail++] = elem;
+ } else if (compareTimeouts(objects[head], elem) > 0) {
+ // Prepend
+ System.arraycopy(objects, head, newObjects, 1, tail);
+ newObjects[0] = elem;
+ tail -= (head - 1);
+ } else {
+ // Insert in the middle
+ int index = upperBound(head, tail - 1, elem);
+ int newIndex = (index - head);
+ System.arraycopy(objects, head, newObjects, 0, newIndex);
+ newObjects[newIndex] = elem;
+ System.arraycopy(objects, index, newObjects, newIndex + 1, tail - index);
+ tail -= (head - 1);
+ }
+ head = 0;
+ objects = newObjects;
+ } else {
+ if (tail == objects.length) {
+ // shift down |-----AAAAAAA|
+ tail -= head;
+ System.arraycopy(objects, head, objects, 0, tail);
+ head = 0;
+ }
+
+ if (tail == head || compareTimeouts(objects[tail - 1], elem) <= 0) {
+ // Append
+ objects[tail++] = elem;
+ } else if (head > 0 && compareTimeouts(objects[head], elem) > 0) {
+ // Prepend
+ objects[--head] = elem;
+ } else {
+ // Insert in the middle
+ int index = upperBound(head, tail - 1, elem);
+ System.arraycopy(objects, index, objects, index + 1, tail - index);
+ objects[index] = elem;
+ tail++;
+ }
+ }
+ }
+
+ private E removeFirst() {
+ E elem = objects[head];
+ objects[head] = null;
+ head = (head + 1) % objects.length;
+ if (head == 0) tail = 0;
+ return elem;
+ }
+
+ private int upperBound(int start, int end, E key) {
+ while (start < end) {
+ int mid = (start + end) >>> 1;
+ E mitem = objects[mid];
+ int cmp = compareTimeouts(mitem, key);
+ if (cmp > 0) {
+ end = mid;
+ } else {
+ start = mid + 1;
+ }
+ }
+ return start;
+ }
+
+ private int compareTimeouts(final E a, final E b) {
+ long t1 = getNanosTimeout(a);
+ long t2 = getNanosTimeout(b);
+ return (t1 < t2) ? -1 : (t1 > t2) ? 1 : 0;
+ }
+
+ private long getNanosTimeout(final E obj) {
+ TimeUnit unit = timeoutRetriever.getTimeUnit(obj);
+ long timeout = timeoutRetriever.getTimeout(obj);
+ return unit.toNanos(timeout);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
new file mode 100644
index 0000000..6e7306c
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class ProcedureTestingUtility {
+ private static final Log LOG = LogFactory.getLog(ProcedureTestingUtility.class);
+
+ private ProcedureTestingUtility() {
+ }
+
+ public static ProcedureStore createStore(final Configuration conf, final FileSystem fs,
+ final Path baseDir) throws IOException {
+ return createWalStore(conf, fs, baseDir);
+ }
+
+ public static WALProcedureStore createWalStore(final Configuration conf, final FileSystem fs,
+ final Path logDir) throws IOException {
+ return new WALProcedureStore(conf, fs, logDir, new WALProcedureStore.LeaseRecovery() {
+ @Override
+ public void recoverFileLease(FileSystem fs, Path path) throws IOException {
+ // no-op
+ }
+ });
+ }
+
+ public static <TEnv> void restart(ProcedureExecutor<TEnv> procExecutor)
+ throws Exception {
+ restart(procExecutor, null);
+ }
+
+ public static <TEnv> void restart(ProcedureExecutor<TEnv> procExecutor,
+ Runnable beforeStartAction) throws Exception {
+ ProcedureStore procStore = procExecutor.getStore();
+ int storeThreads = procExecutor.getNumThreads();
+ int execThreads = procExecutor.getNumThreads();
+ // stop
+ procExecutor.stop();
+ procStore.stop(false);
+ procExecutor.join();
+ // nothing running...
+ if (beforeStartAction != null) {
+ beforeStartAction.run();
+ }
+ // re-start
+ procStore.start(storeThreads);
+ procExecutor.start(execThreads);
+ }
+
+ public static <TEnv> void setKillBeforeStoreUpdate(ProcedureExecutor<TEnv> procExecutor,
+ boolean value) {
+ if (procExecutor.testing == null) {
+ procExecutor.testing = new ProcedureExecutor.Testing();
+ }
+ procExecutor.testing.killBeforeStoreUpdate = value;
+ LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate);
+ }
+
+ public static <TEnv> void setToggleKillBeforeStoreUpdate(ProcedureExecutor<TEnv> procExecutor,
+ boolean value) {
+ if (procExecutor.testing == null) {
+ procExecutor.testing = new ProcedureExecutor.Testing();
+ }
+ procExecutor.testing.toggleKillBeforeStoreUpdate = value;
+ }
+
+ public static <TEnv> void toggleKillBeforeStoreUpdate(ProcedureExecutor<TEnv> procExecutor) {
+ if (procExecutor.testing == null) {
+ procExecutor.testing = new ProcedureExecutor.Testing();
+ }
+ procExecutor.testing.killBeforeStoreUpdate = !procExecutor.testing.killBeforeStoreUpdate;
+ LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate);
+ }
+
+ public static <TEnv> void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor<TEnv> procExecutor,
+ boolean value) {
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, value);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, value);
+ }
+
+ public static <TEnv> long submitAndWait(ProcedureExecutor<TEnv> procExecutor, Procedure proc) {
+ long procId = procExecutor.submitProcedure(proc);
+ waitProcedure(procExecutor, procId);
+ return procId;
+ }
+
+ public static <TEnv> void waitProcedure(ProcedureExecutor<TEnv> procExecutor, long procId) {
+ while (!procExecutor.isFinished(procId) && procExecutor.isRunning()) {
+ Threads.sleepWithoutInterrupt(250);
+ }
+ }
+
+ public static <TEnv> void waitNoProcedureRunning(ProcedureExecutor<TEnv> procExecutor) {
+ int stableRuns = 0;
+ while (stableRuns < 10) {
+ if (procExecutor.getActiveExecutorCount() > 0 || procExecutor.getRunnableSet().size() > 0) {
+ stableRuns = 0;
+ Threads.sleepWithoutInterrupt(100);
+ } else {
+ stableRuns++;
+ Threads.sleepWithoutInterrupt(25);
+ }
+ }
+ }
+
+ public static <TEnv> void assertProcNotYetCompleted(ProcedureExecutor<TEnv> procExecutor,
+ long procId) {
+ assertFalse("expected a running proc", procExecutor.isFinished(procId));
+ assertEquals(null, procExecutor.getResult(procId));
+ }
+
+ public static <TEnv> void assertProcNotFailed(ProcedureExecutor<TEnv> procExecutor,
+ long procId) {
+ ProcedureResult result = procExecutor.getResult(procId);
+ assertTrue("expected procedure result", result != null);
+ assertProcNotFailed(result);
+ }
+
+ public static void assertProcNotFailed(final ProcedureResult result) {
+ Exception exception = result.getException();
+ String msg = exception != null ? exception.toString() : "no exception found";
+ assertFalse(msg, result.isFailed());
+ }
+
+ public static void assertIsAbortException(final ProcedureResult result) {
+ LOG.info(result.getException());
+ assertEquals(true, result.isFailed());
+ Throwable cause = result.getException().getCause();
+ assertTrue("expected abort exception, got "+ cause,
+ cause instanceof ProcedureAbortedException);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java
new file mode 100644
index 0000000..7fe109e
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestProcedureExecution {
+ private static final Log LOG = LogFactory.getLog(TestProcedureExecution.class);
+
+ private static final int PROCEDURE_EXECUTOR_SLOTS = 1;
+ private static final Procedure NULL_PROC = null;
+
+ private ProcedureExecutor<Void> procExecutor;
+ private ProcedureStore procStore;
+
+ private HBaseCommonTestingUtility htu;
+ private FileSystem fs;
+ private Path testDir;
+ private Path logDir;
+
+ @Before
+ public void setUp() throws IOException {
+ htu = new HBaseCommonTestingUtility();
+ testDir = htu.getDataTestDir();
+ fs = testDir.getFileSystem(htu.getConfiguration());
+ assertTrue(testDir.depth() > 1);
+
+ logDir = new Path(testDir, "proc-logs");
+ procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir);
+ procExecutor = new ProcedureExecutor(htu.getConfiguration(), null, procStore);
+ procStore.start(PROCEDURE_EXECUTOR_SLOTS);
+ procExecutor.start(PROCEDURE_EXECUTOR_SLOTS);
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ procExecutor.stop();
+ procStore.stop(false);
+ fs.delete(logDir, true);
+ }
+
+ private static class TestProcedureException extends Exception {
+ public TestProcedureException(String msg) { super(msg); }
+ }
+
+ public static class TestSequentialProcedure extends SequentialProcedure<Void> {
+ private final Procedure[] subProcs;
+ private final List<String> state;
+ private final Exception failure;
+ private final String name;
+
+ public TestSequentialProcedure() {
+ throw new UnsupportedOperationException("recovery should not be triggered here");
+ }
+
+ public TestSequentialProcedure(String name, List<String> state, Procedure... subProcs) {
+ this.state = state;
+ this.subProcs = subProcs;
+ this.name = name;
+ this.failure = null;
+ }
+
+ public TestSequentialProcedure(String name, List<String> state, Exception failure) {
+ this.state = state;
+ this.subProcs = null;
+ this.name = name;
+ this.failure = failure;
+ }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ state.add(name + "-execute");
+ if (failure != null) {
+ setFailure(new RemoteProcedureException(name + "-failure", failure));
+ return null;
+ }
+ return subProcs;
+ }
+
+ @Override
+ protected void rollback(Void env) {
+ state.add(name + "-rollback");
+ }
+
+ @Override
+ protected boolean abort(Void env) {
+ state.add(name + "-abort");
+ return true;
+ }
+ }
+
+ @Test(timeout=30000)
+ public void testBadSubprocList() {
+ List<String> state = new ArrayList<String>();
+ Procedure subProc2 = new TestSequentialProcedure("subProc2", state);
+ Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2, NULL_PROC);
+ Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1);
+ long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc);
+
+ // subProc1 has a "null" subprocedure which is catched as InvalidArgument
+ // failed state with 2 execute and 2 rollback
+ LOG.info(state);
+ ProcedureResult result = procExecutor.getResult(rootId);
+ LOG.info(result.getException());
+ assertTrue(state.toString(), result.isFailed());
+ assertTrue(result.getException().toString(),
+ result.getException().getCause() instanceof IllegalArgumentException);
+
+ assertEquals(state.toString(), 4, state.size());
+ assertEquals("rootProc-execute", state.get(0));
+ assertEquals("subProc1-execute", state.get(1));
+ assertEquals("subProc1-rollback", state.get(2));
+ assertEquals("rootProc-rollback", state.get(3));
+ }
+
+ @Test(timeout=30000)
+ public void testSingleSequentialProc() {
+ List<String> state = new ArrayList<String>();
+ Procedure subProc2 = new TestSequentialProcedure("subProc2", state);
+ Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2);
+ Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1);
+ long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc);
+
+ // successful state, with 3 execute
+ LOG.info(state);
+ ProcedureResult result = procExecutor.getResult(rootId);
+ ProcedureTestingUtility.assertProcNotFailed(result);
+ assertEquals(state.toString(), 3, state.size());
+ }
+
+ @Test(timeout=30000)
+ public void testSingleSequentialProcRollback() {
+ List<String> state = new ArrayList<String>();
+ Procedure subProc2 = new TestSequentialProcedure("subProc2", state,
+ new TestProcedureException("fail test"));
+ Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2);
+ Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1);
+ long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc);
+
+ // the 3rd proc fail, rollback after 2 successful execution
+ LOG.info(state);
+ ProcedureResult result = procExecutor.getResult(rootId);
+ LOG.info(result.getException());
+ assertTrue(state.toString(), result.isFailed());
+ assertTrue(result.getException().toString(),
+ result.getException().getCause() instanceof TestProcedureException);
+
+ assertEquals(state.toString(), 6, state.size());
+ assertEquals("rootProc-execute", state.get(0));
+ assertEquals("subProc1-execute", state.get(1));
+ assertEquals("subProc2-execute", state.get(2));
+ assertEquals("subProc2-rollback", state.get(3));
+ assertEquals("subProc1-rollback", state.get(4));
+ assertEquals("rootProc-rollback", state.get(5));
+ }
+
+ public static class TestFaultyRollback extends SequentialProcedure<Void> {
+ private int retries = 0;
+
+ public TestFaultyRollback() { }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ setFailure("faulty-rollback-test", new TestProcedureException("test faulty rollback"));
+ return null;
+ }
+
+ @Override
+ protected void rollback(Void env) throws IOException {
+ if (++retries < 3) {
+ LOG.info("inject rollback failure " + retries);
+ throw new IOException("injected failure number " + retries);
+ }
+ LOG.info("execute non faulty rollback step retries=" + retries);
+ }
+
+ @Override
+ protected boolean abort(Void env) { return false; }
+ }
+
+ @Test(timeout=30000)
+ public void testRollbackRetriableFailure() {
+ long procId = ProcedureTestingUtility.submitAndWait(procExecutor, new TestFaultyRollback());
+
+ ProcedureResult result = procExecutor.getResult(procId);
+ LOG.info(result.getException());
+ assertTrue("expected a failure", result.isFailed());
+ assertTrue(result.getException().toString(),
+ result.getException().getCause() instanceof TestProcedureException);
+ }
+
+ public static class TestWaitingProcedure extends SequentialProcedure<Void> {
+ private final List<String> state;
+ private final boolean hasChild;
+ private final String name;
+
+ public TestWaitingProcedure() {
+ throw new UnsupportedOperationException("recovery should not be triggered here");
+ }
+
+ public TestWaitingProcedure(String name, List<String> state, boolean hasChild) {
+ this.hasChild = hasChild;
+ this.state = state;
+ this.name = name;
+ }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ state.add(name + "-execute");
+ setState(ProcedureState.WAITING_TIMEOUT);
+ return hasChild ? new Procedure[] { new TestWaitChild(name, state) } : null;
+ }
+
+ @Override
+ protected void rollback(Void env) {
+ state.add(name + "-rollback");
+ }
+
+ @Override
+ protected boolean abort(Void env) {
+ state.add(name + "-abort");
+ return true;
+ }
+
+ public static class TestWaitChild extends SequentialProcedure<Void> {
+ private final List<String> state;
+ private final String name;
+
+ public TestWaitChild() {
+ throw new UnsupportedOperationException("recovery should not be triggered here");
+ }
+
+ public TestWaitChild(String name, List<String> state) {
+ this.name = name;
+ this.state = state;
+ }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ state.add(name + "-child-execute");
+ return null;
+ }
+
+ @Override
+ protected void rollback(Void env) {
+ state.add(name + "-child-rollback");
+ }
+
+ @Override
+ protected boolean abort(Void env) {
+ state.add(name + "-child-abort");
+ return true;
+ }
+ }
+ }
+
+ @Test(timeout=30000)
+ public void testAbortTimeout() {
+ final int PROC_TIMEOUT_MSEC = 2500;
+ List<String> state = new ArrayList<String>();
+ Procedure proc = new TestWaitingProcedure("wproc", state, false);
+ proc.setTimeout(PROC_TIMEOUT_MSEC);
+ long startTime = EnvironmentEdgeManager.currentTime();
+ long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ long execTime = EnvironmentEdgeManager.currentTime() - startTime;
+ LOG.info(state);
+ assertTrue("we didn't wait enough execTime=" + execTime, execTime >= PROC_TIMEOUT_MSEC);
+ ProcedureResult result = procExecutor.getResult(rootId);
+ LOG.info(result.getException());
+ assertTrue(state.toString(), result.isFailed());
+ assertTrue(result.getException().toString(),
+ result.getException().getCause() instanceof TimeoutException);
+ assertEquals(state.toString(), 2, state.size());
+ assertEquals("wproc-execute", state.get(0));
+ assertEquals("wproc-rollback", state.get(1));
+ }
+
+ @Test(timeout=30000)
+ public void testAbortTimeoutWithChildren() {
+ List<String> state = new ArrayList<String>();
+ Procedure proc = new TestWaitingProcedure("wproc", state, true);
+ proc.setTimeout(2500);
+ long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ LOG.info(state);
+ ProcedureResult result = procExecutor.getResult(rootId);
+ LOG.info(result.getException());
+ assertTrue(state.toString(), result.isFailed());
+ assertTrue(result.getException().toString(),
+ result.getException().getCause() instanceof TimeoutException);
+ assertEquals(state.toString(), 4, state.size());
+ assertEquals("wproc-execute", state.get(0));
+ assertEquals("wproc-child-execute", state.get(1));
+ assertEquals("wproc-child-rollback", state.get(2));
+ assertEquals("wproc-rollback", state.get(3));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java
new file mode 100644
index 0000000..e36a295
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestProcedureFairRunQueues {
+ private static class TestRunQueue implements ProcedureFairRunQueues.FairObject {
+ private final int priority;
+ private final String name;
+
+ private boolean available = true;
+
+ public TestRunQueue(String name, int priority) {
+ this.name = name;
+ this.priority = priority;
+ }
+
+ @Override
+ public String toString() {
+ return name;
+ }
+
+ private void setAvailable(boolean available) {
+ this.available = available;
+ }
+
+ @Override
+ public boolean isAvailable() {
+ return available;
+ }
+
+ @Override
+ public int getPriority() {
+ return priority;
+ }
+ }
+
+ @Test
+ public void testEmptyFairQueues() throws Exception {
+ ProcedureFairRunQueues<String, TestRunQueue> fairq
+ = new ProcedureFairRunQueues<String, TestRunQueue>(1);
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(null, fairq.poll());
+ }
+ }
+
+ @Test
+ public void testFairQueues() throws Exception {
+ ProcedureFairRunQueues<String, TestRunQueue> fairq
+ = new ProcedureFairRunQueues<String, TestRunQueue>(1);
+ TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1));
+ TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1));
+ TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2));
+
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(a, fairq.poll());
+ assertEquals(b, fairq.poll());
+ assertEquals(m, fairq.poll());
+ assertEquals(m, fairq.poll());
+ }
+ }
+
+ @Test
+ public void testFairQueuesNotAvailable() throws Exception {
+ ProcedureFairRunQueues<String, TestRunQueue> fairq
+ = new ProcedureFairRunQueues<String, TestRunQueue>(1);
+ TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1));
+ TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1));
+ TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2));
+
+ // m is not available
+ m.setAvailable(false);
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(a, fairq.poll());
+ assertEquals(b, fairq.poll());
+ }
+
+ // m is available
+ m.setAvailable(true);
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(m, fairq.poll());
+ assertEquals(m, fairq.poll());
+ assertEquals(a, fairq.poll());
+ assertEquals(b, fairq.poll());
+ }
+
+ // b is not available
+ b.setAvailable(false);
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(m, fairq.poll());
+ assertEquals(m, fairq.poll());
+ assertEquals(a, fairq.poll());
+ }
+
+ assertEquals(m, fairq.poll());
+ m.setAvailable(false);
+ // m should be fetched next, but is no longer available
+ assertEquals(a, fairq.poll());
+ assertEquals(a, fairq.poll());
+ b.setAvailable(true);
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(b, fairq.poll());
+ assertEquals(a, fairq.poll());
+ }
+ }
+
+ @Test
+ public void testFairQueuesDelete() throws Exception {
+ ProcedureFairRunQueues<String, TestRunQueue> fairq
+ = new ProcedureFairRunQueues<String, TestRunQueue>(1);
+ TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1));
+ TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1));
+ TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2));
+
+ // Fetch A and then remove it
+ assertEquals(a, fairq.poll());
+ assertEquals(a, fairq.remove("A"));
+
+ // Fetch B and then remove it
+ assertEquals(b, fairq.poll());
+ assertEquals(b, fairq.remove("B"));
+
+ // Fetch M and then remove it
+ assertEquals(m, fairq.poll());
+ assertEquals(m, fairq.remove("M"));
+
+ // nothing left
+ assertEquals(null, fairq.poll());
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
new file mode 100644
index 0000000..0b7395b
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
@@ -0,0 +1,488 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestProcedureRecovery {
+ private static final Log LOG = LogFactory.getLog(TestProcedureRecovery.class);
+
+ private static final int PROCEDURE_EXECUTOR_SLOTS = 1;
+ private static final Procedure NULL_PROC = null;
+
+ private static ProcedureExecutor<Void> procExecutor;
+ private static ProcedureStore procStore;
+ private static int procSleepInterval;
+
+ private HBaseCommonTestingUtility htu;
+ private FileSystem fs;
+ private Path testDir;
+ private Path logDir;
+
+ @Before
+ public void setUp() throws IOException {
+ htu = new HBaseCommonTestingUtility();
+ testDir = htu.getDataTestDir();
+ fs = testDir.getFileSystem(htu.getConfiguration());
+ assertTrue(testDir.depth() > 1);
+
+ logDir = new Path(testDir, "proc-logs");
+ procStore = ProcedureTestingUtility.createStore(htu.getConfiguration(), fs, logDir);
+ procExecutor = new ProcedureExecutor(htu.getConfiguration(), null, procStore);
+ procExecutor.testing = new ProcedureExecutor.Testing();
+ procStore.start(PROCEDURE_EXECUTOR_SLOTS);
+ procExecutor.start(PROCEDURE_EXECUTOR_SLOTS);
+ procSleepInterval = 0;
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ procExecutor.stop();
+ procStore.stop(false);
+ fs.delete(logDir, true);
+ }
+
+ private void restart() throws Exception {
+ dumpLogDirState();
+ ProcedureTestingUtility.restart(procExecutor);
+ dumpLogDirState();
+ }
+
+ public static class TestSingleStepProcedure extends SequentialProcedure<Void> {
+ private int step = 0;
+
+ public TestSingleStepProcedure() { }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ LOG.debug("execute procedure " + this + " step=" + step);
+ step++;
+ setResult(Bytes.toBytes(step));
+ return null;
+ }
+
+ @Override
+ protected void rollback(Void env) { }
+
+ @Override
+ protected boolean abort(Void env) { return true; }
+ }
+
+ public static class BaseTestStepProcedure extends SequentialProcedure<Void> {
+ private AtomicBoolean abort = new AtomicBoolean(false);
+ private int step = 0;
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ LOG.debug("execute procedure " + this + " step=" + step);
+ ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor);
+ step++;
+ Threads.sleepWithoutInterrupt(procSleepInterval);
+ if (isAborted()) {
+ setFailure(new RemoteProcedureException(getClass().getName(),
+ new ProcedureAbortedException(
+ "got an abort at " + getClass().getName() + " step=" + step)));
+ return null;
+ }
+ return null;
+ }
+
+ @Override
+ protected void rollback(Void env) {
+ LOG.debug("rollback procedure " + this + " step=" + step);
+ ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor);
+ step++;
+ }
+
+ @Override
+ protected boolean abort(Void env) {
+ abort.set(true);
+ return true;
+ }
+
+ private boolean isAborted() {
+ boolean aborted = abort.get();
+ BaseTestStepProcedure proc = this;
+ while (proc.hasParent() && !aborted) {
+ proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId());
+ aborted = proc.isAborted();
+ }
+ return aborted;
+ }
+ }
+
+ public static class TestMultiStepProcedure extends BaseTestStepProcedure {
+ public TestMultiStepProcedure() { }
+
+ @Override
+ public Procedure[] execute(Void env) {
+ super.execute(env);
+ return isFailed() ? null : new Procedure[] { new Step1Procedure() };
+ }
+
+ public static class Step1Procedure extends BaseTestStepProcedure {
+ public Step1Procedure() { }
+
+ @Override
+ protected Procedure[] execute(Void env) {
+ super.execute(env);
+ return isFailed() ? null : new Procedure[] { new Step2Procedure() };
+ }
+ }
+
+ public static class Step2Procedure extends BaseTestStepProcedure {
+ public Step2Procedure() { }
+ }
+ }
+
+ @Test
+ public void testNoopLoad() throws Exception {
+ restart();
+ }
+
+ @Test(timeout=30000)
+ public void testSingleStepProcRecovery() throws Exception {
+ Procedure proc = new TestSingleStepProcedure();
+ procExecutor.testing.killBeforeStoreUpdate = true;
+ long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ assertFalse(procExecutor.isRunning());
+ procExecutor.testing.killBeforeStoreUpdate = false;
+
+ // Restart and verify that the procedures restart
+ long restartTs = EnvironmentEdgeManager.currentTime();
+ restart();
+ waitProcedure(procId);
+ ProcedureResult result = procExecutor.getResult(procId);
+ assertTrue(result.getLastUpdate() > restartTs);
+ ProcedureTestingUtility.assertProcNotFailed(result);
+ assertEquals(1, Bytes.toInt(result.getResult()));
+ long resultTs = result.getLastUpdate();
+
+ // Verify that after another restart the result is still there
+ restart();
+ result = procExecutor.getResult(procId);
+ ProcedureTestingUtility.assertProcNotFailed(result);
+ assertEquals(resultTs, result.getLastUpdate());
+ assertEquals(1, Bytes.toInt(result.getResult()));
+ }
+
+ @Test(timeout=30000)
+ public void testMultiStepProcRecovery() throws Exception {
+ // Step 0 - kill
+ Procedure proc = new TestMultiStepProcedure();
+ long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 0 exec && Step 1 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 1 exec && step 2 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 2 exec
+ restart();
+ waitProcedure(procId);
+ assertTrue(procExecutor.isRunning());
+
+ // The procedure is completed
+ ProcedureResult result = procExecutor.getResult(procId);
+ ProcedureTestingUtility.assertProcNotFailed(result);
+ }
+
+ @Test(timeout=30000)
+ public void testMultiStepRollbackRecovery() throws Exception {
+ // Step 0 - kill
+ Procedure proc = new TestMultiStepProcedure();
+ long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 0 exec && Step 1 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 1 exec && step 2 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 2 exec - rollback - kill
+ procSleepInterval = 2500;
+ restart();
+ assertTrue(procExecutor.abort(procId));
+ waitProcedure(procId);
+ assertFalse(procExecutor.isRunning());
+
+ // rollback - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // rollback - complete
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Restart the executor and get the result
+ restart();
+ waitProcedure(procId);
+
+ // The procedure is completed
+ ProcedureResult result = procExecutor.getResult(procId);
+ ProcedureTestingUtility.assertIsAbortException(result);
+ }
+
+ public static class TestStateMachineProcedure
+ extends StateMachineProcedure<Void, TestStateMachineProcedure.State> {
+ enum State { STATE_1, STATE_2, STATE_3, DONE }
+
+ public TestStateMachineProcedure() {}
+
+ private AtomicBoolean aborted = new AtomicBoolean(false);
+ private int iResult = 0;
+
+ @Override
+ protected StateMachineProcedure.Flow executeFromState(Void env, State state) {
+ switch (state) {
+ case STATE_1:
+ LOG.info("execute step 1 " + this);
+ setNextState(State.STATE_2);
+ iResult += 3;
+ break;
+ case STATE_2:
+ LOG.info("execute step 2 " + this);
+ setNextState(State.STATE_3);
+ iResult += 5;
+ break;
+ case STATE_3:
+ LOG.info("execute step 3 " + this);
+ Threads.sleepWithoutInterrupt(procSleepInterval);
+ if (aborted.get()) {
+ LOG.info("aborted step 3 " + this);
+ setAbortFailure("test", "aborted");
+ break;
+ }
+ setNextState(State.DONE);
+ iResult += 7;
+ setResult(Bytes.toBytes(iResult));
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException();
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(Void env, final State state) {
+ switch (state) {
+ case STATE_1:
+ LOG.info("rollback step 1 " + this);
+ break;
+ case STATE_2:
+ LOG.info("rollback step 2 " + this);
+ break;
+ case STATE_3:
+ LOG.info("rollback step 3 " + this);
+ break;
+ default:
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ @Override
+ protected State getState(final int stateId) {
+ return State.values()[stateId];
+ }
+
+ @Override
+ protected int getStateId(final State state) {
+ return state.ordinal();
+ }
+
+ @Override
+ protected State getInitialState() {
+ return State.STATE_1;
+ }
+
+ @Override
+ protected boolean abort(Void env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+ stream.write(Bytes.toBytes(iResult));
+ }
+
+ @Override
+ protected void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+ byte[] data = new byte[4];
+ stream.read(data);
+ iResult = Bytes.toInt(data);
+ }
+ }
+
+ @Test(timeout=30000)
+ public void testStateMachineRecovery() throws Exception {
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true);
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true);
+
+ // Step 1 - kill
+ Procedure proc = new TestStateMachineProcedure();
+ long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 1 exec && Step 2 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 2 exec && step 3 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 3 exec
+ restart();
+ waitProcedure(procId);
+ assertTrue(procExecutor.isRunning());
+
+ // The procedure is completed
+ ProcedureResult result = procExecutor.getResult(procId);
+ ProcedureTestingUtility.assertProcNotFailed(result);
+ assertEquals(15, Bytes.toInt(result.getResult()));
+ }
+
+ @Test(timeout=30000)
+ public void testStateMachineRollbackRecovery() throws Exception {
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true);
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true);
+
+ // Step 1 - kill
+ Procedure proc = new TestStateMachineProcedure();
+ long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 1 exec && Step 2 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 2 exec && step 3 - kill
+ restart();
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Step 3 exec - rollback step 3 - kill
+ procSleepInterval = 2500;
+ restart();
+ assertTrue(procExecutor.abort(procId));
+ waitProcedure(procId);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+ assertFalse(procExecutor.isRunning());
+
+ // Rollback step 3 - rollback step 2 - kill
+ restart();
+ waitProcedure(procId);
+ assertFalse(procExecutor.isRunning());
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+
+ // Rollback step 2 - step 1 - kill
+ restart();
+ waitProcedure(procId);
+ assertFalse(procExecutor.isRunning());
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId);
+
+ // Rollback step 1 - complete
+ restart();
+ waitProcedure(procId);
+ assertTrue(procExecutor.isRunning());
+
+ // The procedure is completed
+ ProcedureResult result = procExecutor.getResult(procId);
+ ProcedureTestingUtility.assertIsAbortException(result);
+ }
+
+ private void waitProcedure(final long procId) {
+ ProcedureTestingUtility.waitProcedure(procExecutor, procId);
+ dumpLogDirState();
+ }
+
+ private void dumpLogDirState() {
+ try {
+ FileStatus[] files = fs.listStatus(logDir);
+ if (files != null && files.length > 0) {
+ for (FileStatus file: files) {
+ assertTrue(file.toString(), file.isFile());
+ LOG.debug("log file " + file.getPath() + " size=" + file.getLen());
+ }
+ } else {
+ LOG.debug("no files under: " + logDir);
+ }
+ } catch (IOException e) {
+ LOG.warn("Unable to dump " + logDir, e);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java
new file mode 100644
index 0000000..88645ed
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, LargeTests.class})
+public class TestProcedureReplayOrder {
+ private static final Log LOG = LogFactory.getLog(TestProcedureReplayOrder.class);
+
+ private static final Procedure NULL_PROC = null;
+
+ private ProcedureExecutor<Void> procExecutor;
+ private TestProcedureEnv procEnv;
+ private ProcedureStore procStore;
+
+ private HBaseCommonTestingUtility htu;
+ private FileSystem fs;
+ private Path testDir;
+ private Path logDir;
+
+ @Before
+ public void setUp() throws IOException {
+ htu = new HBaseCommonTestingUtility();
+ htu.getConfiguration().setInt("hbase.procedure.store.wal.sync.wait.msec", 10);
+
+ testDir = htu.getDataTestDir();
+ fs = testDir.getFileSystem(htu.getConfiguration());
+ assertTrue(testDir.depth() > 1);
+
+ logDir = new Path(testDir, "proc-logs");
+ procEnv = new TestProcedureEnv();
+ procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir);
+ procExecutor = new ProcedureExecutor(htu.getConfiguration(), procEnv, procStore);
+ procStore.start(24);
+ procExecutor.start(1);
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ procExecutor.stop();
+ procStore.stop(false);
+ fs.delete(logDir, true);
+ }
+
+ @Test(timeout=90000)
+ public void testSingleStepReplyOrder() throws Exception {
+ // avoid the procedure to be runnable
+ procEnv.setAcquireLock(false);
+
+ // submit the procedures
+ submitProcedures(16, 25, TestSingleStepProcedure.class);
+
+ // restart the executor and allow the procedures to run
+ ProcedureTestingUtility.restart(procExecutor, new Runnable() {
+ @Override
+ public void run() {
+ procEnv.setAcquireLock(true);
+ }
+ });
+
+ // wait the execution of all the procedures and
+ // assert that the execution order was sorted by procId
+ ProcedureTestingUtility.waitNoProcedureRunning(procExecutor);
+ procEnv.assertSortedExecList();
+
+ // TODO: FIXME: This should be revisited
+ }
+
+ @Ignore
+ @Test(timeout=90000)
+ public void testMultiStepReplyOrder() throws Exception {
+ // avoid the procedure to be runnable
+ procEnv.setAcquireLock(false);
+
+ // submit the procedures
+ submitProcedures(16, 10, TestTwoStepProcedure.class);
+
+ // restart the executor and allow the procedures to run
+ ProcedureTestingUtility.restart(procExecutor, new Runnable() {
+ @Override
+ public void run() {
+ procEnv.setAcquireLock(true);
+ }
+ });
+
+ fail("TODO: FIXME: NOT IMPLEMENT REPLAY ORDER");
+ }
+
+ private void submitProcedures(final int nthreads, final int nprocPerThread,
+ final Class<?> procClazz) throws Exception {
+ Thread[] submitThreads = new Thread[nthreads];
+ for (int i = 0; i < submitThreads.length; ++i) {
+ submitThreads[i] = new Thread() {
+ @Override
+ public void run() {
+ for (int i = 0; i < nprocPerThread; ++i) {
+ try {
+ procExecutor.submitProcedure((Procedure)procClazz.newInstance());
+ } catch (InstantiationException|IllegalAccessException e) {
+ LOG.error("unable to instantiate the procedure", e);
+ fail("failure during the proc.newInstance(): " + e.getMessage());
+ }
+ }
+ }
+ };
+ }
+
+ for (int i = 0; i < submitThreads.length; ++i) {
+ submitThreads[i].start();
+ }
+
+ for (int i = 0; i < submitThreads.length; ++i) {
+ submitThreads[i].join();
+ }
+ }
+
+ private static class TestProcedureEnv {
+ private ArrayList<Long> execList = new ArrayList<Long>();
+ private boolean acquireLock = true;
+
+ public void setAcquireLock(boolean acquireLock) {
+ this.acquireLock = acquireLock;
+ }
+
+ public boolean canAcquireLock() {
+ return acquireLock;
+ }
+
+ public void addToExecList(final Procedure proc) {
+ execList.add(proc.getProcId());
+ }
+
+ public ArrayList<Long> getExecList() {
+ return execList;
+ }
+
+ public void assertSortedExecList() {
+ LOG.debug("EXEC LIST: " + execList);
+ for (int i = 1; i < execList.size(); ++i) {
+ assertTrue("exec list not sorted: " + execList.get(i-1) + " >= " + execList.get(i),
+ execList.get(i-1) < execList.get(i));
+ }
+ }
+ }
+
+ public static class TestSingleStepProcedure extends SequentialProcedure<TestProcedureEnv> {
+ public TestSingleStepProcedure() { }
+
+ @Override
+ protected Procedure[] execute(TestProcedureEnv env) {
+ LOG.debug("execute procedure " + this);
+ env.addToExecList(this);
+ return null;
+ }
+
+ protected boolean acquireLock(final TestProcedureEnv env) {
+ return env.canAcquireLock();
+ }
+
+ @Override
+ protected void rollback(TestProcedureEnv env) { }
+
+ @Override
+ protected boolean abort(TestProcedureEnv env) { return true; }
+ }
+
+ public static class TestTwoStepProcedure extends SequentialProcedure<TestProcedureEnv> {
+ public TestTwoStepProcedure() { }
+
+ @Override
+ protected Procedure[] execute(TestProcedureEnv env) {
+ LOG.debug("execute procedure " + this);
+ env.addToExecList(this);
+ return new Procedure[] { new TestSingleStepProcedure() };
+ }
+
+ protected boolean acquireLock(final TestProcedureEnv env) {
+ return true;
+ }
+
+ @Override
+ protected void rollback(TestProcedureEnv env) { }
+
+ @Override
+ protected boolean abort(TestProcedureEnv env) { return true; }
+ }
+}
\ No newline at end of file
[10/50] [abbrv] hbase git commit: HBASE-13203 Procedure v2 - master
create/delete table
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
new file mode 100644
index 0000000..4e9b05e
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "MasterProcedureProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+import "RPC.proto";
+
+// ============================================================================
+// WARNING - Compatibility rules
+// ============================================================================
+// This .proto contains the data serialized by the master procedures.
+// Each procedure has some state stored to know, which step were executed
+// and what were the parameters or data created by the previous steps.
+// new code should be able to handle the old format or at least fail cleanly
+// triggering a rollback/cleanup.
+//
+// Procedures that are inheriting from a StateMachineProcedure have an enum:
+// - Do not change the number of the 'State' enums.
+// doing so, will cause executing the wrong 'step' on the pending
+// procedures when they will be replayed.
+// - Do not remove items from the enum, new code must be able to handle
+// all the previous 'steps'. There may be pending procedure ready to be
+// recovered replayed. alternative you can make sure that not-known state
+// will result in a failure that will rollback the already executed steps.
+// ============================================================================
+
+enum CreateTableState {
+ CREATE_TABLE_PRE_OPERATION = 1;
+ CREATE_TABLE_WRITE_FS_LAYOUT = 2;
+ CREATE_TABLE_ADD_TO_META = 3;
+ CREATE_TABLE_ASSIGN_REGIONS = 4;
+ CREATE_TABLE_UPDATE_DESC_CACHE = 5;
+ CREATE_TABLE_POST_OPERATION = 6;
+}
+
+message CreateTableStateData {
+ required UserInformation user_info = 1;
+ required TableSchema table_schema = 2;
+ repeated RegionInfo region_info = 3;
+}
+
+enum DeleteTableState {
+ DELETE_TABLE_PRE_OPERATION = 1;
+ DELETE_TABLE_REMOVE_FROM_META = 2;
+ DELETE_TABLE_CLEAR_FS_LAYOUT = 3;
+ DELETE_TABLE_UPDATE_DESC_CACHE = 4;
+ DELETE_TABLE_UNASSIGN_REGIONS = 5;
+ DELETE_TABLE_POST_OPERATION = 6;
+}
+
+message DeleteTableStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ repeated RegionInfo region_info = 3;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index edebb1a..107480a 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -316,6 +316,10 @@
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-procedure</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
</dependency>
<dependency>
@@ -336,6 +340,12 @@
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-procedure</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</dependency>
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
index 971fa50..0da16a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java
@@ -21,6 +21,7 @@ import java.net.InetAddress;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo;
public interface RpcCallContext extends Delayable {
/**
@@ -57,4 +58,9 @@ public interface RpcCallContext extends Delayable {
* @return Address of remote client if a request is ongoing, else null
*/
InetAddress getRemoteAddress();
+
+ /**
+ * @return the client version info, or null if the information is not present
+ */
+ VersionInfo getClientVersionInfo();
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 770f4cd..c69a187 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.AuthMethod;
@@ -399,7 +400,7 @@ public class RpcServer implements RpcServerInterface {
// Set the exception as the result of the method invocation.
headerBuilder.setException(exceptionBuilder.build());
}
- // Pass reservoir to buildCellBlock. Keep reference to returne so can add it back to the
+ // Pass reservoir to buildCellBlock. Keep reference to returne so can add it back to the
// reservoir when finished. This is hacky and the hack is not contained but benefits are
// high when we can avoid a big buffer allocation on each rpc.
this.cellBlock = ipcUtil.buildCellBlock(this.connection.codec,
@@ -544,6 +545,11 @@ public class RpcServer implements RpcServerInterface {
public InetAddress getRemoteAddress() {
return remoteAddress;
}
+
+ @Override
+ public VersionInfo getClientVersionInfo() {
+ return connection.getVersionInfo();
+ }
}
/** Listens on the socket. Creates jobs for the handler threads*/
@@ -1273,6 +1279,13 @@ public class RpcServer implements RpcServerInterface {
this.lastContact = lastContact;
}
+ public VersionInfo getVersionInfo() {
+ if (connectionHeader.hasVersionInfo()) {
+ return connectionHeader.getVersionInfo();
+ }
+ return null;
+ }
+
/* Return true if the connection has no outstanding rpc */
private boolean isIdle() {
return rpcCount.get() == 0;
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 581e3c9..8ec883a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -90,8 +90,6 @@ import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
-import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
@@ -100,11 +98,18 @@ import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
@@ -123,6 +128,7 @@ import org.apache.hadoop.hbase.util.EncryptionTest;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.HasThread;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -290,6 +296,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// it is assigned after 'initialized' guard set to true, so should be volatile
private volatile MasterQuotaManager quotaManager;
+ private ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
+ private WALProcedureStore procedureStore;
+
// handle table states
private TableStateManager tableStateManager;
@@ -1002,6 +1011,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// Any time changing this maxThreads to > 1, pls see the comment at
// AccessController#postCreateTableHandler
this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
+ startProcedureExecutor();
// Start log cleaner thread
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
@@ -1023,6 +1033,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
+ protected void sendShutdownInterrupt() {
+ super.sendShutdownInterrupt();
+ stopProcedureExecutor();
+ }
+
+ @Override
protected void stopServiceThreads() {
if (masterJettyServer != null) {
LOG.info("Stopping master jetty server");
@@ -1034,6 +1050,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
super.stopServiceThreads();
stopChores();
+
// Wait for all the remaining region servers to report in IFF we were
// running a cluster shutdown AND we were NOT aborting.
if (!isAborted() && this.serverManager != null &&
@@ -1054,6 +1071,34 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
}
+ private void startProcedureExecutor() throws IOException {
+ final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
+ final Path logDir = new Path(fileSystemManager.getRootDir(),
+ MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
+
+ procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
+ new MasterProcedureEnv.WALStoreLeaseRecovery(this));
+ procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
+ procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
+ procEnv.getProcedureQueue());
+
+ final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
+ Math.max(Runtime.getRuntime().availableProcessors(),
+ MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
+ procedureStore.start(numThreads);
+ procedureExecutor.start(numThreads);
+ }
+
+ private void stopProcedureExecutor() {
+ if (procedureExecutor != null) {
+ procedureExecutor.stop();
+ }
+
+ if (procedureStore != null) {
+ procedureStore.stop(isAborted());
+ }
+ }
+
private void stopChores() {
if (this.balancerChore != null) {
this.balancerChore.cancel(true);
@@ -1290,7 +1335,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
ensureNamespaceExists(namespace);
- HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
+ HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized();
sanityCheckTableDescriptor(hTableDescriptor);
this.quotaManager.checkNamespaceTableAndRegionQuota(hTableDescriptor.getTableName(),
@@ -1299,13 +1344,22 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preCreateTable(hTableDescriptor, newRegions);
}
LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
- this.service.submit(new CreateTableHandler(this,
- this.fileSystemManager, hTableDescriptor, conf,
- newRegions, this).prepare());
+
+ // TODO: We can handle/merge duplicate requests, and differentiate the case of
+ // TableExistsException by saying if the schema is the same or not.
+ ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
+ long procId = this.procedureExecutor.submitProcedure(
+ new CreateTableProcedure(procedureExecutor.getEnvironment(),
+ hTableDescriptor, newRegions, latch));
+ latch.await();
+
if (cpHost != null) {
cpHost.postCreateTable(hTableDescriptor, newRegions);
}
+ // TODO: change the interface to return the procId,
+ // and add it to the response protobuf.
+ //return procId;
}
/**
@@ -1512,29 +1566,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
}
- private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
- byte[][] splitKeys) {
- long regionId = System.currentTimeMillis();
- HRegionInfo[] hRegionInfos = null;
- if (splitKeys == null || splitKeys.length == 0) {
- hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
- false, regionId)};
- } else {
- int numRegions = splitKeys.length + 1;
- hRegionInfos = new HRegionInfo[numRegions];
- byte[] startKey = null;
- byte[] endKey = null;
- for (int i = 0; i < numRegions; i++) {
- endKey = (i == splitKeys.length) ? null : splitKeys[i];
- hRegionInfos[i] =
- new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
- false, regionId);
- startKey = endKey;
- }
- }
- return hRegionInfos;
- }
-
private static boolean isCatalogTable(final TableName tableName) {
return tableName.equals(TableName.META_TABLE_NAME);
}
@@ -1546,10 +1577,20 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preDeleteTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
- this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
+
+ // TODO: We can handle/merge duplicate request
+ ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
+ long procId = this.procedureExecutor.submitProcedure(
+ new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch));
+ latch.await();
+
if (cpHost != null) {
cpHost.postDeleteTable(tableName);
}
+
+ // TODO: change the interface to return the procId,
+ // and add it to the response protobuf.
+ //return procId;
}
@Override
@@ -1851,6 +1892,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
+ public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return procedureExecutor;
+ }
+
+ @Override
public ServerName getServerName() {
return this.serverName;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 63f3119..7352fe8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@ -82,6 +84,11 @@ public interface MasterServices extends Server {
MasterQuotaManager getMasterQuotaManager();
/**
+ * @return Master's instance of {@link ProcedureExecutor}
+ */
+ ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor();
+
+ /**
* Check table is modifiable; i.e. exists and is offline.
* @param tableName Name of table to check.
* @throws TableNotDisabledException
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index f0f8fdd..02912b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
@@ -231,18 +231,15 @@ public class TableNamespaceManager {
}
private void createNamespaceTable(MasterServices masterServices) throws IOException {
- HRegionInfo newRegions[] = new HRegionInfo[]{
+ HRegionInfo[] newRegions = new HRegionInfo[]{
new HRegionInfo(HTableDescriptor.NAMESPACE_TABLEDESC.getTableName(), null, null)};
- //we need to create the table this way to bypass
- //checkInitialized
- masterServices.getExecutorService()
- .submit(new CreateTableHandler(masterServices,
- masterServices.getMasterFileSystem(),
- HTableDescriptor.NAMESPACE_TABLEDESC,
- masterServices.getConfiguration(),
- newRegions,
- masterServices).prepare());
+ // we need to create the table this way to bypass checkInitialized
+ masterServices.getMasterProcedureExecutor()
+ .submitProcedure(new CreateTableProcedure(
+ masterServices.getMasterProcedureExecutor().getEnvironment(),
+ HTableDescriptor.NAMESPACE_TABLEDESC,
+ newRegions));
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
new file mode 100644
index 0000000..dd6d387
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -0,0 +1,442 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.common.collect.Lists;
+
+@InterfaceAudience.Private
+public class CreateTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, CreateTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ // used for compatibility with old clients
+ private final ProcedurePrepareLatch syncLatch;
+
+ private HTableDescriptor hTableDescriptor;
+ private List<HRegionInfo> newRegions;
+ private UserGroupInformation user;
+
+ public CreateTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ syncLatch = null;
+ }
+
+ public CreateTableProcedure(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions)
+ throws IOException {
+ this(env, hTableDescriptor, newRegions, null);
+ }
+
+ public CreateTableProcedure(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
+ final ProcedurePrepareLatch syncLatch)
+ throws IOException {
+ this.hTableDescriptor = hTableDescriptor;
+ this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null;
+ this.user = env.getRequestUser().getUGI();
+
+ // used for compatibility with clients without procedures
+ // they need a sync TableExistsException
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case CREATE_TABLE_PRE_OPERATION:
+ // Verify if we can create the table
+ boolean exists = !prepareCreate(env);
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+
+ if (exists) {
+ assert isFailed() : "the delete should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+
+ preCreate(env);
+ setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT);
+ break;
+ case CREATE_TABLE_WRITE_FS_LAYOUT:
+ newRegions = createFsLayout(env, hTableDescriptor, newRegions);
+ setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META);
+ break;
+ case CREATE_TABLE_ADD_TO_META:
+ newRegions = addTableToMeta(env, hTableDescriptor, newRegions);
+ setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS);
+ break;
+ case CREATE_TABLE_ASSIGN_REGIONS:
+ assignRegions(env, getTableName(), newRegions);
+ setNextState(CreateTableState.CREATE_TABLE_UPDATE_DESC_CACHE);
+ break;
+ case CREATE_TABLE_UPDATE_DESC_CACHE:
+ updateTableDescCache(env, getTableName());
+ setNextState(CreateTableState.CREATE_TABLE_POST_OPERATION);
+ break;
+ case CREATE_TABLE_POST_OPERATION:
+ postCreate(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ LOG.error("Error trying to create table=" + getTableName() + " state=" + state, e);
+ setFailure("master-create-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state)
+ throws IOException {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case CREATE_TABLE_POST_OPERATION:
+ break;
+ case CREATE_TABLE_UPDATE_DESC_CACHE:
+ DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName());
+ break;
+ case CREATE_TABLE_ASSIGN_REGIONS:
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ break;
+ case CREATE_TABLE_ADD_TO_META:
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), newRegions);
+ break;
+ case CREATE_TABLE_WRITE_FS_LAYOUT:
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, false);
+ break;
+ case CREATE_TABLE_PRE_OPERATION:
+ DeleteTableProcedure.deleteTableStates(env, getTableName());
+ // TODO-MAYBE: call the deleteTable coprocessor event?
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ break;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step=" + state + " table=" + getTableName(), e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected CreateTableState getState(final int stateId) {
+ return CreateTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final CreateTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected CreateTableState getInitialState() {
+ return CreateTableState.CREATE_TABLE_PRE_OPERATION;
+ }
+
+ @Override
+ protected void setNextState(final CreateTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("create-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public TableName getTableName() {
+ return hTableDescriptor.getTableName();
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.CREATE;
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.CreateTableStateData.Builder state =
+ MasterProcedureProtos.CreateTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setTableSchema(hTableDescriptor.convert());
+ if (newRegions != null) {
+ for (HRegionInfo hri: newRegions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.CreateTableStateData state =
+ MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
+ if (state.getRegionInfoCount() == 0) {
+ newRegions = null;
+ } else {
+ newRegions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ newRegions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "create table");
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
+ final TableName tableName = getTableName();
+ if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ setFailure("master-create-table", new TableExistsException(getTableName()));
+ return false;
+ }
+ return true;
+ }
+
+ private void preCreate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final HRegionInfo[] regions = newRegions == null ? null :
+ newRegions.toArray(new HRegionInfo[newRegions.size()]);
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.preCreateTableHandler(hTableDescriptor, regions);
+ return null;
+ }
+ });
+ }
+ }
+
+ private void postCreate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final HRegionInfo[] regions = (newRegions == null) ? null :
+ newRegions.toArray(new HRegionInfo[newRegions.size()]);
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.postCreateTableHandler(hTableDescriptor, regions);
+ return null;
+ }
+ });
+ }
+ }
+
+ protected interface CreateHdfsRegions {
+ List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env,
+ final Path tableRootDir, final TableName tableName,
+ final List<HRegionInfo> newRegions) throws IOException;
+ }
+
+ protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions)
+ throws IOException {
+ return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
+ @Override
+ public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env,
+ final Path tableRootDir, final TableName tableName,
+ final List<HRegionInfo> newRegions) throws IOException {
+ HRegionInfo[] regions = newRegions != null ?
+ newRegions.toArray(new HRegionInfo[newRegions.size()]) : null;
+ return ModifyRegionUtils.createRegions(env.getMasterConfiguration(),
+ tableRootDir, hTableDescriptor, regions, null);
+ }
+ });
+ }
+
+ protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions,
+ final CreateHdfsRegions hdfsRegionHandler) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ final Path tempdir = mfs.getTempDir();
+
+ // 1. Create Table Descriptor
+ // using a copy of descriptor, table will be created enabling first
+ TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor);
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
+ ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
+ .createTableDescriptorForTableDirectory(
+ tempTableDir, underConstruction, false);
+
+ // 2. Create Regions
+ newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
+ hTableDescriptor.getTableName(), newRegions);
+
+ // 3. Move Table temp directory to the hbase root location
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
+ FileSystem fs = mfs.getFileSystem();
+ if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
+ throw new IOException("Couldn't delete " + tableDir);
+ }
+ if (!fs.rename(tempTableDir, tableDir)) {
+ throw new IOException("Unable to move table from temp=" + tempTableDir +
+ " to hbase root=" + tableDir);
+ }
+ return newRegions;
+ }
+
+ protected static List<HRegionInfo> addTableToMeta(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor,
+ final List<HRegionInfo> regions) throws IOException {
+ if (regions != null && regions.size() > 0) {
+ ProcedureSyncWait.waitMetaRegions(env);
+
+ // Add regions to META
+ addRegionsToMeta(env, hTableDescriptor, regions);
+ // Add replicas if needed
+ List<HRegionInfo> newRegions = addReplicas(env, hTableDescriptor, regions);
+
+ // Setup replication for region replicas if needed
+ if (hTableDescriptor.getRegionReplication() > 1) {
+ ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
+ }
+ return newRegions;
+ }
+ return regions;
+ }
+
+ /**
+ * Create any replicas for the regions (the default replicas that was
+ * already created is passed to the method)
+ * @param hTableDescriptor descriptor to use
+ * @param regions default replicas
+ * @return the combined list of default and non-default replicas
+ */
+ private static List<HRegionInfo> addReplicas(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor,
+ final List<HRegionInfo> regions) {
+ int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1;
+ if (numRegionReplicas <= 0) {
+ return regions;
+ }
+ List<HRegionInfo> hRegionInfos =
+ new ArrayList<HRegionInfo>((numRegionReplicas+1)*regions.size());
+ for (int i = 0; i < regions.size(); i++) {
+ for (int j = 1; j <= numRegionReplicas; j++) {
+ hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), j));
+ }
+ }
+ hRegionInfos.addAll(regions);
+ return hRegionInfos;
+ }
+
+ protected static void assignRegions(final MasterProcedureEnv env,
+ final TableName tableName, final List<HRegionInfo> regions) throws IOException {
+ ProcedureSyncWait.waitRegionServers(env);
+
+ // Trigger immediate assignment of the regions in round-robin fashion
+ final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
+ ModifyRegionUtils.assignRegions(assignmentManager, regions);
+
+ // Enable table
+ assignmentManager.getTableStateManager()
+ .setTableState(tableName, TableState.State.ENABLED);
+ }
+
+ /**
+ * Add the specified set of regions to the hbase:meta table.
+ */
+ protected static void addRegionsToMeta(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor,
+ final List<HRegionInfo> regionInfos) throws IOException {
+ MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(),
+ regionInfos, hTableDescriptor.getRegionReplication());
+ }
+
+ protected static void updateTableDescCache(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ env.getMasterServices().getTableDescriptors().get(tableName);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
new file mode 100644
index 0000000..ad5e671
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -0,0 +1,420 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.exceptions.HBaseException;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class DeleteTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, DeleteTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DeleteTableProcedure.class);
+
+ private List<HRegionInfo> regions;
+ private UserGroupInformation user;
+ private TableName tableName;
+
+ // used for compatibility with old clients
+ private final ProcedurePrepareLatch syncLatch;
+
+ public DeleteTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ syncLatch = null;
+ }
+
+ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ this(env, tableName, null);
+ }
+
+ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName,
+ final ProcedurePrepareLatch syncLatch) throws IOException {
+ this.tableName = tableName;
+ this.user = env.getRequestUser().getUGI();
+
+ // used for compatibility with clients without procedures
+ // they need a sync TableNotFoundException, TableNotDisabledException, ...
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case DELETE_TABLE_PRE_OPERATION:
+ // Verify if we can delete the table
+ boolean deletable = prepareDelete(env);
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ if (!deletable) {
+ assert isFailed() : "the delete should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+
+ preDelete(env);
+
+ // TODO: Move out... in the acquireLock()
+ LOG.debug("waiting for '" + getTableName() + "' regions in transition");
+ regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
+ ProcedureSyncWait.waitRegionInTransition(env, regions);
+
+ setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META);
+ break;
+ case DELETE_TABLE_REMOVE_FROM_META:
+ LOG.debug("delete '" + getTableName() + "' regions from META");
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
+ setNextState(DeleteTableState.DELETE_TABLE_CLEAR_FS_LAYOUT);
+ break;
+ case DELETE_TABLE_CLEAR_FS_LAYOUT:
+ LOG.debug("delete '" + getTableName() + "' from filesystem");
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
+ setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE);
+ break;
+ case DELETE_TABLE_UPDATE_DESC_CACHE:
+ LOG.debug("delete '" + getTableName() + "' descriptor");
+ DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName());
+ setNextState(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS);
+ break;
+ case DELETE_TABLE_UNASSIGN_REGIONS:
+ LOG.debug("delete '" + getTableName() + "' assignment state");
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ setNextState(DeleteTableState.DELETE_TABLE_POST_OPERATION);
+ break;
+ case DELETE_TABLE_POST_OPERATION:
+ postDelete(env);
+ LOG.debug("delete '" + getTableName() + "' completed");
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (HBaseException|IOException e) {
+ LOG.warn("Retriable error trying to delete table=" + getTableName() + " state=" + state, e);
+ } catch (InterruptedException e) {
+ // if the interrupt is real, the executor will be stopped.
+ LOG.warn("Interrupted trying to delete table=" + getTableName() + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final DeleteTableState state) {
+ if (state == DeleteTableState.DELETE_TABLE_PRE_OPERATION) {
+ // nothing to rollback, pre-delete is just table-state checks.
+ // We can fail if the table does not exist or is not disabled.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ return;
+ }
+
+ // The delete doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+
+ @Override
+ protected DeleteTableState getState(final int stateId) {
+ return DeleteTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final DeleteTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected DeleteTableState getInitialState() {
+ return DeleteTableState.DELETE_TABLE_PRE_OPERATION;
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.DELETE;
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ // TODO: We may be able to abort if the procedure is not started yet.
+ return false;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "delete table");
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DeleteTableStateData.Builder state =
+ MasterProcedureProtos.DeleteTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName));
+ if (regions != null) {
+ for (HRegionInfo hri: regions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.DeleteTableStateData state =
+ MasterProcedureProtos.DeleteTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ tableName = ProtobufUtil.toTableName(state.getTableName());
+ if (state.getRegionInfoCount() == 0) {
+ regions = null;
+ } else {
+ regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ regions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ private boolean prepareDelete(final MasterProcedureEnv env) throws IOException {
+ try {
+ env.getMasterServices().checkTableModifiable(tableName);
+ } catch (TableNotFoundException|TableNotDisabledException e) {
+ setFailure("master-delete-table", e);
+ return false;
+ }
+ return true;
+ }
+
+ private boolean preDelete(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = this.tableName;
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.preDeleteTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ return true;
+ }
+
+ private void postDelete(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ deleteTableStates(env, tableName);
+
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = this.tableName;
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.postDeleteTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ }
+
+ protected static void deleteFromFs(final MasterProcedureEnv env,
+ final TableName tableName, final List<HRegionInfo> regions,
+ final boolean archive) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ final FileSystem fs = mfs.getFileSystem();
+ final Path tempdir = mfs.getTempDir();
+
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
+
+ if (fs.exists(tableDir)) {
+ // Ensure temp exists
+ if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ // Ensure parent exists
+ if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ // Move the table in /hbase/.tmp
+ if (!fs.rename(tableDir, tempTableDir)) {
+ if (fs.exists(tempTableDir)) {
+ // TODO
+ // what's in this dir? something old? probably something manual from the user...
+ // let's get rid of this stuff...
+ FileStatus[] files = fs.listStatus(tempdir);
+ if (files != null && files.length > 0) {
+ for (int i = 0; i < files.length; ++i) {
+ if (!files[i].isDir()) continue;
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath());
+ }
+ }
+ fs.delete(tempdir, true);
+ }
+ throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
+ }
+ }
+
+ // Archive regions from FS (temp directory)
+ if (archive) {
+ for (HRegionInfo hri : regions) {
+ LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
+ tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+ }
+ LOG.debug("Table '" + tableName + "' archived!");
+ }
+
+ // Delete table directory from FS (temp directory)
+ if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
+ throw new IOException("Couldn't delete " + tempTableDir);
+ }
+ }
+
+ /**
+ * There may be items for this table still up in hbase:meta in the case where the
+ * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
+ * that have to do with this table. See HBASE-12980.
+ * @throws IOException
+ */
+ private static void cleanAnyRemainingRows(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ ClusterConnection connection = env.getMasterServices().getConnection();
+ Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName);
+ try (Table metaTable =
+ connection.getTable(TableName.META_TABLE_NAME)) {
+ List<Delete> deletes = new ArrayList<Delete>();
+ try (ResultScanner resScanner = metaTable.getScanner(tableScan)) {
+ for (Result result : resScanner) {
+ deletes.add(new Delete(result.getRow()));
+ }
+ }
+ if (!deletes.isEmpty()) {
+ LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName +
+ " from " + TableName.META_TABLE_NAME);
+ metaTable.delete(deletes);
+ }
+ }
+ }
+
+ protected static void deleteFromMeta(final MasterProcedureEnv env,
+ final TableName tableName, List<HRegionInfo> regions) throws IOException {
+ MetaTableAccessor.deleteRegions(env.getMasterServices().getConnection(), regions);
+
+ // Clean any remaining rows for this table.
+ cleanAnyRemainingRows(env, tableName);
+ }
+
+ protected static void deleteAssignmentState(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ AssignmentManager am = env.getMasterServices().getAssignmentManager();
+
+ // Clean up regions of the table in RegionStates.
+ LOG.debug("Removing '" + tableName + "' from region states.");
+ am.getRegionStates().tableDeleted(tableName);
+
+ // If entry for this table states, remove it.
+ LOG.debug("Marking '" + tableName + "' as deleted.");
+ am.getTableStateManager().setDeletedTable(tableName);
+ }
+
+ protected static void deleteTableDescriptorCache(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ LOG.debug("Removing '" + tableName + "' descriptor.");
+ env.getMasterServices().getTableDescriptors().remove(tableName);
+ }
+
+ protected static void deleteTableStates(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ getMasterQuotaManager(env).removeTableFromNamespaceQuota(tableName);
+ }
+
+ private static MasterQuotaManager getMasterQuotaManager(final MasterProcedureEnv env)
+ throws IOException {
+ return ProcedureSyncWait.waitFor(env, "quota manager to be available",
+ new ProcedureSyncWait.Predicate<MasterQuotaManager>() {
+ @Override
+ public MasterQuotaManager evaluate() throws IOException {
+ return env.getMasterServices().getMasterQuotaManager();
+ }
+ });
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java
new file mode 100644
index 0000000..90ed4ee
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public final class MasterProcedureConstants {
+ private MasterProcedureConstants() {}
+
+ public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
+
+ public static final String MASTER_PROCEDURE_THREADS = "hbase.master.procedure.threads";
+ public static final int DEFAULT_MIN_MASTER_PROCEDURE_THREADS = 4;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
new file mode 100644
index 0000000..0a33cd4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class MasterProcedureEnv {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureEnv.class);
+
+ @InterfaceAudience.Private
+ public static class WALStoreLeaseRecovery implements WALProcedureStore.LeaseRecovery {
+ private final HMaster master;
+
+ public WALStoreLeaseRecovery(final HMaster master) {
+ this.master = master;
+ }
+
+ @Override
+ public void recoverFileLease(final FileSystem fs, final Path path) throws IOException {
+ final Configuration conf = master.getConfiguration();
+ final FSUtils fsUtils = FSUtils.getInstance(fs, conf);
+ fsUtils.recoverFileLease(fs, path, conf, new CancelableProgressable() {
+ @Override
+ public boolean progress() {
+ LOG.debug("Recover Procedure Store log lease: " + path);
+ return master.isActiveMaster();
+ }
+ });
+ }
+ }
+
+ @InterfaceAudience.Private
+ public static class MasterProcedureStoreListener
+ implements ProcedureStore.ProcedureStoreListener {
+ private final HMaster master;
+
+ public MasterProcedureStoreListener(final HMaster master) {
+ this.master = master;
+ }
+
+ @Override
+ public void abortProcess() {
+ master.abort("The Procedure Store lost the lease");
+ }
+ }
+
+ private final MasterProcedureQueue procQueue;
+ private final MasterServices master;
+
+ public MasterProcedureEnv(final MasterServices master) {
+ this.master = master;
+ this.procQueue = new MasterProcedureQueue(master.getConfiguration(),
+ master.getTableLockManager());
+ }
+
+ public User getRequestUser() throws IOException {
+ User user = RpcServer.getRequestUser();
+ if (user == null) {
+ user = UserProvider.instantiate(getMasterConfiguration()).getCurrent();
+ }
+ return user;
+ }
+
+ public MasterServices getMasterServices() {
+ return master;
+ }
+
+ public Configuration getMasterConfiguration() {
+ return master.getConfiguration();
+ }
+
+ public MasterCoprocessorHost getMasterCoprocessorHost() {
+ return master.getMasterCoprocessorHost();
+ }
+
+ public MasterProcedureQueue getProcedureQueue() {
+ return procQueue;
+ }
+
+ public boolean isRunning() {
+ return master.getMasterProcedureExecutor().isRunning();
+ }
+
+ public boolean isInitialized() {
+ return master.isInitialized();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java
new file mode 100644
index 0000000..0dd0c3d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureFairRunQueues;
+import org.apache.hadoop.hbase.procedure2.ProcedureRunnableSet;
+import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
+
+/**
+ * ProcedureRunnableSet for the Master Procedures.
+ * This RunnableSet tries to provide to the ProcedureExecutor procedures
+ * that can be executed without having to wait on a lock.
+ * Most of the master operations can be executed concurrently, if the they
+ * are operating on different tables (e.g. two create table can be performed
+ * at the same, time assuming table A and table B).
+ *
+ * Each procedure should implement an interface providing information for this queue.
+ * for example table related procedures should implement TableProcedureInterface.
+ * each procedure will be pushed in its own queue, and based on the operation type
+ * we may take smarter decision. e.g. we can abort all the operations preceding
+ * a delete table, or similar.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class MasterProcedureQueue implements ProcedureRunnableSet {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureQueue.class);
+
+ private final ProcedureFairRunQueues<TableName, RunQueue> fairq;
+ private final ReentrantLock lock = new ReentrantLock();
+ private final Condition waitCond = lock.newCondition();
+ private final TableLockManager lockManager;
+
+ private final int metaTablePriority;
+ private final int userTablePriority;
+ private final int sysTablePriority;
+
+ private int queueSize;
+
+ public MasterProcedureQueue(final Configuration conf, final TableLockManager lockManager) {
+ this.fairq = new ProcedureFairRunQueues<TableName, RunQueue>(1);
+ this.lockManager = lockManager;
+
+ // TODO: should this be part of the HTD?
+ metaTablePriority = conf.getInt("hbase.master.procedure.queue.meta.table.priority", 3);
+ sysTablePriority = conf.getInt("hbase.master.procedure.queue.system.table.priority", 2);
+ userTablePriority = conf.getInt("hbase.master.procedure.queue.user.table.priority", 1);
+ }
+
+ @Override
+ public void addFront(final Procedure proc) {
+ lock.lock();
+ try {
+ getRunQueueOrCreate(proc).addFront(proc);
+ queueSize++;
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void addBack(final Procedure proc) {
+ lock.lock();
+ try {
+ getRunQueueOrCreate(proc).addBack(proc);
+ queueSize++;
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void yield(final Procedure proc) {
+ addFront(proc);
+ }
+
+ @Override
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
+ public Long poll() {
+ lock.lock();
+ try {
+ if (queueSize == 0) {
+ waitCond.await();
+ if (queueSize == 0) {
+ return null;
+ }
+ }
+
+ RunQueue queue = fairq.poll();
+ if (queue != null && queue.isAvailable()) {
+ queueSize--;
+ return queue.poll();
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return null;
+ } finally {
+ lock.unlock();
+ }
+ return null;
+ }
+
+ @Override
+ public void signalAll() {
+ lock.lock();
+ try {
+ waitCond.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void clear() {
+ lock.lock();
+ try {
+ fairq.clear();
+ queueSize = 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public int size() {
+ lock.lock();
+ try {
+ return queueSize;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public String toString() {
+ lock.lock();
+ try {
+ return "MasterProcedureQueue size=" + queueSize + ": " + fairq;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void completionCleanup(Procedure proc) {
+ if (proc instanceof TableProcedureInterface) {
+ TableProcedureInterface iProcTable = (TableProcedureInterface)proc;
+ boolean tableDeleted;
+ if (proc.hasException()) {
+ IOException procEx = proc.getException().unwrapRemoteException();
+ if (iProcTable.getTableOperationType() == TableOperationType.CREATE) {
+ // create failed because the table already exist
+ tableDeleted = !(procEx instanceof TableExistsException);
+ } else {
+ // the operation failed because the table does not exist
+ tableDeleted = (procEx instanceof TableNotFoundException);
+ }
+ } else {
+ // the table was deleted
+ tableDeleted = (iProcTable.getTableOperationType() == TableOperationType.DELETE);
+ }
+ if (tableDeleted) {
+ markTableAsDeleted(iProcTable.getTableName());
+ }
+ }
+ }
+
+ private RunQueue getRunQueueOrCreate(final Procedure proc) {
+ if (proc instanceof TableProcedureInterface) {
+ final TableName table = ((TableProcedureInterface)proc).getTableName();
+ return getRunQueueOrCreate(table);
+ }
+ // TODO: at the moment we only have Table procedures
+ // if you are implementing a non-table procedure, you have two option create
+ // a group for all the non-table procedures or try to find a key for your
+ // non-table procedure and implement something similar to the TableRunQueue.
+ throw new UnsupportedOperationException("RQs for non-table procedures are not implemented yet");
+ }
+
+ private TableRunQueue getRunQueueOrCreate(final TableName table) {
+ final TableRunQueue queue = getRunQueue(table);
+ if (queue != null) return queue;
+ return (TableRunQueue)fairq.add(table, createTableRunQueue(table));
+ }
+
+ private TableRunQueue createTableRunQueue(final TableName table) {
+ int priority = userTablePriority;
+ if (table.equals(TableName.META_TABLE_NAME)) {
+ priority = metaTablePriority;
+ } else if (table.isSystemTable()) {
+ priority = sysTablePriority;
+ }
+ return new TableRunQueue(priority);
+ }
+
+ private TableRunQueue getRunQueue(final TableName table) {
+ return (TableRunQueue)fairq.get(table);
+ }
+
+ /**
+ * Try to acquire the read lock on the specified table.
+ * other read operations in the table-queue may be executed concurrently,
+ * otherwise they have to wait until all the read-locks are released.
+ * @param table Table to lock
+ * @param purpose Human readable reason for locking the table
+ * @return true if we were able to acquire the lock on the table, otherwise false.
+ */
+ public boolean tryAcquireTableRead(final TableName table, final String purpose) {
+ return getRunQueueOrCreate(table).tryRead(lockManager, table, purpose);
+ }
+
+ /**
+ * Release the read lock taken with tryAcquireTableRead()
+ * @param table the name of the table that has the read lock
+ */
+ public void releaseTableRead(final TableName table) {
+ getRunQueue(table).releaseRead(lockManager, table);
+ }
+
+ /**
+ * Try to acquire the write lock on the specified table.
+ * other operations in the table-queue will be executed after the lock is released.
+ * @param table Table to lock
+ * @param purpose Human readable reason for locking the table
+ * @return true if we were able to acquire the lock on the table, otherwise false.
+ */
+ public boolean tryAcquireTableWrite(final TableName table, final String purpose) {
+ return getRunQueueOrCreate(table).tryWrite(lockManager, table, purpose);
+ }
+
+ /**
+ * Release the write lock taken with tryAcquireTableWrite()
+ * @param table the name of the table that has the write lock
+ */
+ public void releaseTableWrite(final TableName table) {
+ getRunQueue(table).releaseWrite(lockManager, table);
+ }
+
+ /**
+ * Tries to remove the queue and the table-lock of the specified table.
+ * If there are new operations pending (e.g. a new create),
+ * the remove will not be performed.
+ * @param table the name of the table that should be marked as deleted
+ * @return true if deletion succeeded, false otherwise meaning that there are
+ * other new operations pending for that table (e.g. a new create).
+ */
+ protected boolean markTableAsDeleted(final TableName table) {
+ TableRunQueue queue = getRunQueue(table);
+ if (queue != null) {
+ lock.lock();
+ try {
+ if (queue.isEmpty() && !queue.isLocked()) {
+ fairq.remove(table);
+
+ // Remove the table lock
+ try {
+ lockManager.tableDeleted(table);
+ } catch (IOException e) {
+ LOG.warn("Received exception from TableLockManager.tableDeleted:", e); //not critical
+ }
+ } else {
+ // TODO: If there are no create, we can drop all the other ops
+ return false;
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+ return true;
+ }
+
+ private interface RunQueue extends ProcedureFairRunQueues.FairObject {
+ void addFront(Procedure proc);
+ void addBack(Procedure proc);
+ Long poll();
+ boolean isLocked();
+ }
+
+ /**
+ * Run Queue for a Table. It contains a read-write lock that is used by the
+ * MasterProcedureQueue to decide if we should fetch an item from this queue
+ * or skip to another one which will be able to run without waiting for locks.
+ */
+ private static class TableRunQueue implements RunQueue {
+ private final Deque<Long> runnables = new ArrayDeque<Long>();
+ private final int priority;
+
+ private TableLock tableLock = null;
+ private boolean wlock = false;
+ private int rlock = 0;
+
+ public TableRunQueue(int priority) {
+ this.priority = priority;
+ }
+
+ @Override
+ public void addFront(final Procedure proc) {
+ runnables.addFirst(proc.getProcId());
+ }
+
+ // TODO: Improve run-queue push with TableProcedureInterface.getType()
+ // we can take smart decisions based on the type of the operation (e.g. create/delete)
+ @Override
+ public void addBack(final Procedure proc) {
+ runnables.addLast(proc.getProcId());
+ }
+
+ @Override
+ public Long poll() {
+ return runnables.poll();
+ }
+
+ @Override
+ public boolean isAvailable() {
+ synchronized (this) {
+ return !wlock && !runnables.isEmpty();
+ }
+ }
+
+ public boolean isEmpty() {
+ return runnables.isEmpty();
+ }
+
+ @Override
+ public boolean isLocked() {
+ synchronized (this) {
+ return wlock || rlock > 0;
+ }
+ }
+
+ public boolean tryRead(final TableLockManager lockManager,
+ final TableName tableName, final String purpose) {
+ synchronized (this) {
+ if (wlock) {
+ return false;
+ }
+
+ // Take zk-read-lock
+ tableLock = lockManager.readLock(tableName, purpose);
+ try {
+ tableLock.acquire();
+ } catch (IOException e) {
+ LOG.error("failed acquire read lock on " + tableName, e);
+ tableLock = null;
+ return false;
+ }
+
+ rlock++;
+ }
+ return true;
+ }
+
+ public void releaseRead(final TableLockManager lockManager,
+ final TableName tableName) {
+ synchronized (this) {
+ releaseTableLock(lockManager, rlock == 1);
+ rlock--;
+ }
+ }
+
+ public boolean tryWrite(final TableLockManager lockManager,
+ final TableName tableName, final String purpose) {
+ synchronized (this) {
+ if (wlock || rlock > 0) {
+ return false;
+ }
+
+ // Take zk-write-lock
+ tableLock = lockManager.writeLock(tableName, purpose);
+ try {
+ tableLock.acquire();
+ } catch (IOException e) {
+ LOG.error("failed acquire write lock on " + tableName, e);
+ tableLock = null;
+ return false;
+ }
+ wlock = true;
+ }
+ return true;
+ }
+
+ public void releaseWrite(final TableLockManager lockManager,
+ final TableName tableName) {
+ synchronized (this) {
+ releaseTableLock(lockManager, true);
+ wlock = false;
+ }
+ }
+
+ private void releaseTableLock(final TableLockManager lockManager, boolean reset) {
+ for (int i = 0; i < 3; ++i) {
+ try {
+ tableLock.release();
+ if (reset) {
+ tableLock = null;
+ }
+ break;
+ } catch (IOException e) {
+ LOG.warn("Could not release the table write-lock", e);
+ }
+ }
+ }
+
+ @Override
+ public int getPriority() {
+ return priority;
+ }
+
+ @Override
+ public String toString() {
+ return runnables.toString();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
new file mode 100644
index 0000000..d7c0b92
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class MasterProcedureUtil {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureUtil.class);
+
+ private MasterProcedureUtil() {}
+
+ public static UserInformation toProtoUserInfo(UserGroupInformation ugi) {
+ UserInformation.Builder userInfoPB = UserInformation.newBuilder();
+ userInfoPB.setEffectiveUser(ugi.getUserName());
+ if (ugi.getRealUser() != null) {
+ userInfoPB.setRealUser(ugi.getRealUser().getUserName());
+ }
+ return userInfoPB.build();
+ }
+
+ public static UserGroupInformation toUserInfo(UserInformation userInfoProto) {
+ if (userInfoProto.hasEffectiveUser()) {
+ String effectiveUser = userInfoProto.getEffectiveUser();
+ if (userInfoProto.hasRealUser()) {
+ String realUser = userInfoProto.getRealUser();
+ UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser);
+ return UserGroupInformation.createProxyUser(effectiveUser, realUserUgi);
+ }
+ return UserGroupInformation.createRemoteUser(effectiveUser);
+ }
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
new file mode 100644
index 0000000..2a1abca
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RpcCallContext;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo;
+
+/**
+ * Latch used by the Master to have the prepare() sync behaviour for old
+ * clients, that can only get exceptions in a synchronous way.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class ProcedurePrepareLatch {
+ private static final NoopLatch noopLatch = new NoopLatch();
+
+ public static ProcedurePrepareLatch createLatch() {
+ // don't use the latch if we have procedure support
+ return hasProcedureSupport() ? noopLatch : new CompatibilityLatch();
+ }
+
+ public static boolean hasProcedureSupport() {
+ return currentClientHasMinimumVersion(1, 1);
+ }
+
+ private static boolean currentClientHasMinimumVersion(int major, int minor) {
+ RpcCallContext call = RpcServer.getCurrentCall();
+ VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null;
+ if (versionInfo != null) {
+ String[] components = versionInfo.getVersion().split("\\.");
+
+ int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0;
+ if (clientMajor != major) {
+ return clientMajor > major;
+ }
+
+ int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0;
+ return clientMinor >= minor;
+ }
+ return false;
+ }
+
+ protected abstract void countDown(final Procedure proc);
+ public abstract void await() throws IOException;
+
+ protected static void releaseLatch(final ProcedurePrepareLatch latch, final Procedure proc) {
+ if (latch != null) {
+ latch.countDown(proc);
+ }
+ }
+
+ private static class NoopLatch extends ProcedurePrepareLatch {
+ protected void countDown(final Procedure proc) {}
+ public void await() throws IOException {}
+ }
+
+ protected static class CompatibilityLatch extends ProcedurePrepareLatch {
+ private final CountDownLatch latch = new CountDownLatch(1);
+
+ private IOException exception = null;
+
+ protected void countDown(final Procedure proc) {
+ if (proc.hasException()) {
+ exception = proc.getException().unwrapRemoteException();
+ }
+ latch.countDown();
+ }
+
+ public void await() throws IOException {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+
+ if (exception != null) {
+ throw exception;
+ }
+ }
+ }
+}
[41/50] [abbrv] hbase git commit: HBASE-13453. Master should not bind
to region server ports (Srikanth Srungarapu)
Posted by jm...@apache.org.
HBASE-13453. Master should not bind to region server ports (Srikanth Srungarapu)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14261bc9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14261bc9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14261bc9
Branch: refs/heads/hbase-11339
Commit: 14261bc9e515bbaf2fa40f5779cb83a3372f0e57
Parents: 2da1bf1
Author: Devaraj Das <dd...@apache.org>
Authored: Wed Apr 15 11:02:08 2015 -0700
Committer: Devaraj Das <dd...@apache.org>
Committed: Wed Apr 15 11:02:08 2015 -0700
----------------------------------------------------------------------
hbase-common/src/main/resources/hbase-default.xml | 5 +++++
.../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 3 +++
.../org/apache/hadoop/hbase/regionserver/HRegionServer.java | 4 ++++
.../org/apache/hadoop/hbase/regionserver/RSRpcServices.java | 5 +++++
4 files changed, 17 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/14261bc9/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index ff4136b..bf14a53 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -96,6 +96,11 @@ possible configurations would overwhelm and obscure the important.
</property>
<!--Master configurations-->
+ <property >
+ <name>hbase.master.port</name>
+ <value>16000</value>
+ <description>The port the HBase Master should bind to.</description>
+ </property>
<property>
<name>hbase.master.info.port</name>
<value>16010</value>
http://git-wip-us.apache.org/repos/asf/hbase/blob/14261bc9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 45bcdcb..bcc43f4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -417,6 +417,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
RedirectServlet.regionServerInfoPort = infoServer.getPort();
+ if(RedirectServlet.regionServerInfoPort == infoPort) {
+ return infoPort;
+ }
masterJettyServer = new org.mortbay.jetty.Server();
Connector connector = new SelectChannelConnector();
connector.setHost(addr);
http://git-wip-us.apache.org/repos/asf/hbase/blob/14261bc9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f15eb1b..0cc1e51 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1734,6 +1734,10 @@ public class HRegionServer extends HasThread implements
private int putUpWebUI() throws IOException {
int port = this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT,
HConstants.DEFAULT_REGIONSERVER_INFOPORT);
+ if(this instanceof HMaster) {
+ port = conf.getInt(HConstants.MASTER_INFO_PORT,
+ HConstants.DEFAULT_MASTER_INFOPORT);
+ }
// -1 is for disabling info server
if (port < 0) return port;
String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0");
http://git-wip-us.apache.org/repos/asf/hbase/blob/14261bc9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index f9b8d61..15bf2cb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.master.MasterRpcServices;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
@@ -800,6 +801,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
String hostname = getHostname(rs.conf);
int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT,
HConstants.DEFAULT_REGIONSERVER_PORT);
+ if(this instanceof MasterRpcServices) {
+ port = rs.conf.getInt(HConstants.MASTER_PORT,
+ HConstants.DEFAULT_MASTER_PORT);
+ }
// Creation of a HSA will force a resolve.
InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
InetSocketAddress bindAddress = new InetSocketAddress(
[26/50] [abbrv] hbase git commit: HBASE-13209 Procedure V2 - master
Add/Modify/Delete Column Family (Stephen Yuan Jiang)
Posted by jm...@apache.org.
HBASE-13209 Procedure V2 - master Add/Modify/Delete Column Family (Stephen Yuan Jiang)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ae8b8cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ae8b8cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ae8b8cc
Branch: refs/heads/hbase-11339
Commit: 4ae8b8cc52c0bb08ad31c32e5c76c0ad6f268f6f
Parents: 7f53833
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 21:21:18 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:43 2015 +0100
----------------------------------------------------------------------
.../generated/MasterProcedureProtos.java | 4027 +++++++++++++++++-
.../src/main/protobuf/MasterProcedure.proto | 46 +
.../org/apache/hadoop/hbase/master/HMaster.java | 30 +-
.../handler/TableDeleteFamilyHandler.java | 6 +-
.../procedure/AddColumnFamilyProcedure.java | 409 ++
.../procedure/DeleteColumnFamilyProcedure.java | 441 ++
.../procedure/ModifyColumnFamilyProcedure.java | 384 ++
.../hbase/master/TestTableLockManager.java | 31 -
.../handler/TestTableDeleteFamilyHandler.java | 122 +-
.../TestTableDescriptorModification.java | 124 +-
.../MasterProcedureTestingUtility.java | 34 +
.../procedure/TestAddColumnFamilyProcedure.java | 246 ++
.../TestDeleteColumnFamilyProcedure.java | 302 ++
.../TestModifyColumnFamilyProcedure.java | 238 ++
14 files changed, 6323 insertions(+), 117 deletions(-)
----------------------------------------------------------------------
[25/50] [abbrv] hbase git commit: HBASE-13209 Procedure V2 - master
Add/Modify/Delete Column Family (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 98260c1..4713a0a 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -371,6 +371,342 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(enum_scope:DeleteTableState)
}
+ /**
+ * Protobuf enum {@code AddColumnFamilyState}
+ */
+ public enum AddColumnFamilyState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>ADD_COLUMN_FAMILY_PREPARE = 1;</code>
+ */
+ ADD_COLUMN_FAMILY_PREPARE(0, 1),
+ /**
+ * <code>ADD_COLUMN_FAMILY_PRE_OPERATION = 2;</code>
+ */
+ ADD_COLUMN_FAMILY_PRE_OPERATION(1, 2),
+ /**
+ * <code>ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3),
+ /**
+ * <code>ADD_COLUMN_FAMILY_POST_OPERATION = 4;</code>
+ */
+ ADD_COLUMN_FAMILY_POST_OPERATION(3, 4),
+ /**
+ * <code>ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5;</code>
+ */
+ ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS(4, 5),
+ ;
+
+ /**
+ * <code>ADD_COLUMN_FAMILY_PREPARE = 1;</code>
+ */
+ public static final int ADD_COLUMN_FAMILY_PREPARE_VALUE = 1;
+ /**
+ * <code>ADD_COLUMN_FAMILY_PRE_OPERATION = 2;</code>
+ */
+ public static final int ADD_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2;
+ /**
+ * <code>ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ public static final int ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3;
+ /**
+ * <code>ADD_COLUMN_FAMILY_POST_OPERATION = 4;</code>
+ */
+ public static final int ADD_COLUMN_FAMILY_POST_OPERATION_VALUE = 4;
+ /**
+ * <code>ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5;</code>
+ */
+ public static final int ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 5;
+
+
+ public final int getNumber() { return value; }
+
+ public static AddColumnFamilyState valueOf(int value) {
+ switch (value) {
+ case 1: return ADD_COLUMN_FAMILY_PREPARE;
+ case 2: return ADD_COLUMN_FAMILY_PRE_OPERATION;
+ case 3: return ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR;
+ case 4: return ADD_COLUMN_FAMILY_POST_OPERATION;
+ case 5: return ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<AddColumnFamilyState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<AddColumnFamilyState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<AddColumnFamilyState>() {
+ public AddColumnFamilyState findValueByNumber(int number) {
+ return AddColumnFamilyState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3);
+ }
+
+ private static final AddColumnFamilyState[] VALUES = values();
+
+ public static AddColumnFamilyState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private AddColumnFamilyState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:AddColumnFamilyState)
+ }
+
+ /**
+ * Protobuf enum {@code ModifyColumnFamilyState}
+ */
+ public enum ModifyColumnFamilyState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_PREPARE = 1;</code>
+ */
+ MODIFY_COLUMN_FAMILY_PREPARE(0, 1),
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2;</code>
+ */
+ MODIFY_COLUMN_FAMILY_PRE_OPERATION(1, 2),
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3),
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_POST_OPERATION = 4;</code>
+ */
+ MODIFY_COLUMN_FAMILY_POST_OPERATION(3, 4),
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5;</code>
+ */
+ MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS(4, 5),
+ ;
+
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_PREPARE = 1;</code>
+ */
+ public static final int MODIFY_COLUMN_FAMILY_PREPARE_VALUE = 1;
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2;</code>
+ */
+ public static final int MODIFY_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2;
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ public static final int MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3;
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_POST_OPERATION = 4;</code>
+ */
+ public static final int MODIFY_COLUMN_FAMILY_POST_OPERATION_VALUE = 4;
+ /**
+ * <code>MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5;</code>
+ */
+ public static final int MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 5;
+
+
+ public final int getNumber() { return value; }
+
+ public static ModifyColumnFamilyState valueOf(int value) {
+ switch (value) {
+ case 1: return MODIFY_COLUMN_FAMILY_PREPARE;
+ case 2: return MODIFY_COLUMN_FAMILY_PRE_OPERATION;
+ case 3: return MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR;
+ case 4: return MODIFY_COLUMN_FAMILY_POST_OPERATION;
+ case 5: return MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<ModifyColumnFamilyState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<ModifyColumnFamilyState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<ModifyColumnFamilyState>() {
+ public ModifyColumnFamilyState findValueByNumber(int number) {
+ return ModifyColumnFamilyState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+ }
+
+ private static final ModifyColumnFamilyState[] VALUES = values();
+
+ public static ModifyColumnFamilyState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private ModifyColumnFamilyState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:ModifyColumnFamilyState)
+ }
+
+ /**
+ * Protobuf enum {@code DeleteColumnFamilyState}
+ */
+ public enum DeleteColumnFamilyState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>DELETE_COLUMN_FAMILY_PREPARE = 1;</code>
+ */
+ DELETE_COLUMN_FAMILY_PREPARE(0, 1),
+ /**
+ * <code>DELETE_COLUMN_FAMILY_PRE_OPERATION = 2;</code>
+ */
+ DELETE_COLUMN_FAMILY_PRE_OPERATION(1, 2),
+ /**
+ * <code>DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3),
+ /**
+ * <code>DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4;</code>
+ */
+ DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT(3, 4),
+ /**
+ * <code>DELETE_COLUMN_FAMILY_POST_OPERATION = 5;</code>
+ */
+ DELETE_COLUMN_FAMILY_POST_OPERATION(4, 5),
+ /**
+ * <code>DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6;</code>
+ */
+ DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS(5, 6),
+ ;
+
+ /**
+ * <code>DELETE_COLUMN_FAMILY_PREPARE = 1;</code>
+ */
+ public static final int DELETE_COLUMN_FAMILY_PREPARE_VALUE = 1;
+ /**
+ * <code>DELETE_COLUMN_FAMILY_PRE_OPERATION = 2;</code>
+ */
+ public static final int DELETE_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2;
+ /**
+ * <code>DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3;</code>
+ */
+ public static final int DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3;
+ /**
+ * <code>DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4;</code>
+ */
+ public static final int DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT_VALUE = 4;
+ /**
+ * <code>DELETE_COLUMN_FAMILY_POST_OPERATION = 5;</code>
+ */
+ public static final int DELETE_COLUMN_FAMILY_POST_OPERATION_VALUE = 5;
+ /**
+ * <code>DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6;</code>
+ */
+ public static final int DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static DeleteColumnFamilyState valueOf(int value) {
+ switch (value) {
+ case 1: return DELETE_COLUMN_FAMILY_PREPARE;
+ case 2: return DELETE_COLUMN_FAMILY_PRE_OPERATION;
+ case 3: return DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR;
+ case 4: return DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT;
+ case 5: return DELETE_COLUMN_FAMILY_POST_OPERATION;
+ case 6: return DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<DeleteColumnFamilyState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<DeleteColumnFamilyState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<DeleteColumnFamilyState>() {
+ public DeleteColumnFamilyState findValueByNumber(int number) {
+ return DeleteColumnFamilyState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
+ }
+
+ private static final DeleteColumnFamilyState[] VALUES = values();
+
+ public static DeleteColumnFamilyState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private DeleteColumnFamilyState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:DeleteColumnFamilyState)
+ }
+
public interface CreateTableStateDataOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -3776,66 +4112,3621 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(class_scope:DeleteTableStateData)
}
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_CreateTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_CreateTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ModifyTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ModifyTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DeleteTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DeleteTableStateData_fieldAccessorTable;
+ public interface AddColumnFamilyStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" +
- "C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
- "_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" +
- "chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" +
- "\030\003 \003(\0132\013.RegionInfo\"\277\001\n\024ModifyTableState" +
- "Data\022#\n\tuser_info\030\001 \002(\0132\020.UserInformatio" +
- "n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
- "leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
- "\014.TableSchema\022&\n\036delete_column_family_in" +
- "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
- "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
- "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
- "o\030\003 \003(\0132\013.RegionInfo*\330\001\n\020CreateTableStat" +
- "e\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034CRE" +
- "ATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TA" +
- "BLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN" +
- "_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_C" +
- "ACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020\006*" +
- "\207\002\n\020ModifyTableState\022\030\n\024MODIFY_TABLE_PRE" +
- "PARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(",
- "\n$MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003" +
- "\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_COLUMN\020\004" +
- "\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033M" +
- "ODIFY_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY_T" +
- "ABLE_REOPEN_ALL_REGIONS\020\007*\337\001\n\020DeleteTabl" +
- "eState\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!" +
- "\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DEL" +
- "ETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TA" +
- "BLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_" +
- "UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_",
- "OPERATION\020\006BK\n*org.apache.hadoop.hbase.p" +
- "rotobuf.generatedB\025MasterProcedureProtos" +
- "H\001\210\001\001\240\001\001"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required .ColumnFamilySchema columnfamily_schema = 3;
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ boolean hasColumnfamilySchema();
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema();
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder();
+
+ // optional .TableSchema unmodified_table_schema = 4;
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ boolean hasUnmodifiedTableSchema();
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema();
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder();
+ }
+ /**
+ * Protobuf type {@code AddColumnFamilyStateData}
+ */
+ public static final class AddColumnFamilyStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements AddColumnFamilyStateDataOrBuilder {
+ // Use AddColumnFamilyStateData.newBuilder() to construct.
+ private AddColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private AddColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final AddColumnFamilyStateData defaultInstance;
+ public static AddColumnFamilyStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public AddColumnFamilyStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AddColumnFamilyStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = columnfamilySchema_.toBuilder();
+ }
+ columnfamilySchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(columnfamilySchema_);
+ columnfamilySchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 34: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = unmodifiedTableSchema_.toBuilder();
+ }
+ unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(unmodifiedTableSchema_);
+ unmodifiedTableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<AddColumnFamilyStateData> PARSER =
+ new com.google.protobuf.AbstractParser<AddColumnFamilyStateData>() {
+ public AddColumnFamilyStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new AddColumnFamilyStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<AddColumnFamilyStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required .ColumnFamilySchema columnfamily_schema = 3;
+ public static final int COLUMNFAMILY_SCHEMA_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_;
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public boolean hasColumnfamilySchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() {
+ return columnfamilySchema_;
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() {
+ return columnfamilySchema_;
+ }
+
+ // optional .TableSchema unmodified_table_schema = 4;
+ public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_;
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public boolean hasUnmodifiedTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ return unmodifiedTableSchema_;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ return unmodifiedTableSchema_;
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasColumnfamilySchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getColumnfamilySchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, columnfamilySchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(4, unmodifiedTableSchema_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, columnfamilySchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, unmodifiedTableSchema_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasColumnfamilySchema() == other.hasColumnfamilySchema());
+ if (hasColumnfamilySchema()) {
+ result = result && getColumnfamilySchema()
+ .equals(other.getColumnfamilySchema());
+ }
+ result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema());
+ if (hasUnmodifiedTableSchema()) {
+ result = result && getUnmodifiedTableSchema()
+ .equals(other.getUnmodifiedTableSchema());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasColumnfamilySchema()) {
+ hash = (37 * hash) + COLUMNFAMILY_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getColumnfamilySchema().hashCode();
+ }
+ if (hasUnmodifiedTableSchema()) {
+ hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getUnmodifiedTableSchema().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code AddColumnFamilyStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ getColumnfamilySchemaFieldBuilder();
+ getUnmodifiedTableSchemaFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (columnfamilySchemaBuilder_ == null) {
+ columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
+ } else {
+ columnfamilySchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ unmodifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (columnfamilySchemaBuilder_ == null) {
+ result.columnfamilySchema_ = columnfamilySchema_;
+ } else {
+ result.columnfamilySchema_ = columnfamilySchemaBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ result.unmodifiedTableSchema_ = unmodifiedTableSchema_;
+ } else {
+ result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasColumnfamilySchema()) {
+ mergeColumnfamilySchema(other.getColumnfamilySchema());
+ }
+ if (other.hasUnmodifiedTableSchema()) {
+ mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasColumnfamilySchema()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ if (!getColumnfamilySchema().isInitialized()) {
+
+ return false;
+ }
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required .ColumnFamilySchema columnfamily_schema = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnfamilySchemaBuilder_;
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public boolean hasColumnfamilySchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() {
+ if (columnfamilySchemaBuilder_ == null) {
+ return columnfamilySchema_;
+ } else {
+ return columnfamilySchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public Builder setColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) {
+ if (columnfamilySchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ columnfamilySchema_ = value;
+ onChanged();
+ } else {
+ columnfamilySchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public Builder setColumnfamilySchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) {
+ if (columnfamilySchemaBuilder_ == null) {
+ columnfamilySchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ columnfamilySchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public Builder mergeColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) {
+ if (columnfamilySchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ columnfamilySchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) {
+ columnfamilySchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(columnfamilySchema_).mergeFrom(value).buildPartial();
+ } else {
+ columnfamilySchema_ = value;
+ }
+ onChanged();
+ } else {
+ columnfamilySchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public Builder clearColumnfamilySchema() {
+ if (columnfamilySchemaBuilder_ == null) {
+ columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
+ onChanged();
+ } else {
+ columnfamilySchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnfamilySchemaBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getColumnfamilySchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() {
+ if (columnfamilySchemaBuilder_ != null) {
+ return columnfamilySchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return columnfamilySchema_;
+ }
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+ getColumnfamilySchemaFieldBuilder() {
+ if (columnfamilySchemaBuilder_ == null) {
+ columnfamilySchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>(
+ columnfamilySchema_,
+ getParentForChildren(),
+ isClean());
+ columnfamilySchema_ = null;
+ }
+ return columnfamilySchemaBuilder_;
+ }
+
+ // optional .TableSchema unmodified_table_schema = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_;
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public boolean hasUnmodifiedTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ return unmodifiedTableSchema_;
+ } else {
+ return unmodifiedTableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ unmodifiedTableSchema_ = value;
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public Builder setUnmodifiedTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ unmodifiedTableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ unmodifiedTableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public Builder clearUnmodifiedTableSchema() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ unmodifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() {
+ bitField0_ |= 0x00000008;
+ onChanged();
+ return getUnmodifiedTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ if (unmodifiedTableSchemaBuilder_ != null) {
+ return unmodifiedTableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return unmodifiedTableSchema_;
+ }
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getUnmodifiedTableSchemaFieldBuilder() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ unmodifiedTableSchema_,
+ getParentForChildren(),
+ isClean());
+ unmodifiedTableSchema_ = null;
+ }
+ return unmodifiedTableSchemaBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:AddColumnFamilyStateData)
+ }
+
+ static {
+ defaultInstance = new AddColumnFamilyStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:AddColumnFamilyStateData)
+ }
+
+ public interface ModifyColumnFamilyStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required .ColumnFamilySchema columnfamily_schema = 3;
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ boolean hasColumnfamilySchema();
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema();
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder();
+
+ // optional .TableSchema unmodified_table_schema = 4;
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ boolean hasUnmodifiedTableSchema();
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema();
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder();
+ }
+ /**
+ * Protobuf type {@code ModifyColumnFamilyStateData}
+ */
+ public static final class ModifyColumnFamilyStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements ModifyColumnFamilyStateDataOrBuilder {
+ // Use ModifyColumnFamilyStateData.newBuilder() to construct.
+ private ModifyColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private ModifyColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final ModifyColumnFamilyStateData defaultInstance;
+ public static ModifyColumnFamilyStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ModifyColumnFamilyStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ModifyColumnFamilyStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = columnfamilySchema_.toBuilder();
+ }
+ columnfamilySchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(columnfamilySchema_);
+ columnfamilySchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 34: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = unmodifiedTableSchema_.toBuilder();
+ }
+ unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(unmodifiedTableSchema_);
+ unmodifiedTableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<ModifyColumnFamilyStateData> PARSER =
+ new com.google.protobuf.AbstractParser<ModifyColumnFamilyStateData>() {
+ public ModifyColumnFamilyStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new ModifyColumnFamilyStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<ModifyColumnFamilyStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required .ColumnFamilySchema columnfamily_schema = 3;
+ public static final int COLUMNFAMILY_SCHEMA_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_;
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public boolean hasColumnfamilySchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() {
+ return columnfamilySchema_;
+ }
+ /**
+ * <code>required .ColumnFamilySchema columnfamily_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() {
+ return columnfamilySchema_;
+ }
+
+ // optional .TableSchema unmodified_table_schema = 4;
+ public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_;
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public boolean hasUnmodifiedTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ return unmodifiedTableSchema_;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ return unmodifiedTableSchema_;
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasColumnfamilySchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getColumnfamilySchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, columnfamilySchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(4, unmodifiedTableSchema_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, columnfamilySchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, unmodifiedTableSchema_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasColumnfamilySchema() == other.hasColumnfamilySchema());
+ if (hasColumnfamilySchema()) {
+ result = result && getColumnfamilySchema()
+ .equals(other.getColumnfamilySchema());
+ }
+ result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema());
+ if (hasUnmodifiedTableSchema()) {
+ result = result && getUnmodifiedTableSchema()
+ .equals(other.getUnmodifiedTableSchema());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasColumnfamilySchema()) {
+ hash = (37 * hash) + COLUMNFAMILY_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getColumnfamilySchema().hashCode();
+ }
+ if (hasUnmodifiedTableSchema()) {
+ hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getUnmodifiedTableSchema().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.
<TRUNCATED>
[11/50] [abbrv] hbase git commit: HBASE-13203 Procedure v2 - master
create/delete table
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b5f1f98a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
new file mode 100644
index 0000000..6d1694a
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -0,0 +1,2633 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: MasterProcedure.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class MasterProcedureProtos {
+ private MasterProcedureProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ /**
+ * Protobuf enum {@code CreateTableState}
+ */
+ public enum CreateTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>CREATE_TABLE_PRE_OPERATION = 1;</code>
+ */
+ CREATE_TABLE_PRE_OPERATION(0, 1),
+ /**
+ * <code>CREATE_TABLE_WRITE_FS_LAYOUT = 2;</code>
+ */
+ CREATE_TABLE_WRITE_FS_LAYOUT(1, 2),
+ /**
+ * <code>CREATE_TABLE_ADD_TO_META = 3;</code>
+ */
+ CREATE_TABLE_ADD_TO_META(2, 3),
+ /**
+ * <code>CREATE_TABLE_ASSIGN_REGIONS = 4;</code>
+ */
+ CREATE_TABLE_ASSIGN_REGIONS(3, 4),
+ /**
+ * <code>CREATE_TABLE_UPDATE_DESC_CACHE = 5;</code>
+ */
+ CREATE_TABLE_UPDATE_DESC_CACHE(4, 5),
+ /**
+ * <code>CREATE_TABLE_POST_OPERATION = 6;</code>
+ */
+ CREATE_TABLE_POST_OPERATION(5, 6),
+ ;
+
+ /**
+ * <code>CREATE_TABLE_PRE_OPERATION = 1;</code>
+ */
+ public static final int CREATE_TABLE_PRE_OPERATION_VALUE = 1;
+ /**
+ * <code>CREATE_TABLE_WRITE_FS_LAYOUT = 2;</code>
+ */
+ public static final int CREATE_TABLE_WRITE_FS_LAYOUT_VALUE = 2;
+ /**
+ * <code>CREATE_TABLE_ADD_TO_META = 3;</code>
+ */
+ public static final int CREATE_TABLE_ADD_TO_META_VALUE = 3;
+ /**
+ * <code>CREATE_TABLE_ASSIGN_REGIONS = 4;</code>
+ */
+ public static final int CREATE_TABLE_ASSIGN_REGIONS_VALUE = 4;
+ /**
+ * <code>CREATE_TABLE_UPDATE_DESC_CACHE = 5;</code>
+ */
+ public static final int CREATE_TABLE_UPDATE_DESC_CACHE_VALUE = 5;
+ /**
+ * <code>CREATE_TABLE_POST_OPERATION = 6;</code>
+ */
+ public static final int CREATE_TABLE_POST_OPERATION_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static CreateTableState valueOf(int value) {
+ switch (value) {
+ case 1: return CREATE_TABLE_PRE_OPERATION;
+ case 2: return CREATE_TABLE_WRITE_FS_LAYOUT;
+ case 3: return CREATE_TABLE_ADD_TO_META;
+ case 4: return CREATE_TABLE_ASSIGN_REGIONS;
+ case 5: return CREATE_TABLE_UPDATE_DESC_CACHE;
+ case 6: return CREATE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<CreateTableState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<CreateTableState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<CreateTableState>() {
+ public CreateTableState findValueByNumber(int number) {
+ return CreateTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final CreateTableState[] VALUES = values();
+
+ public static CreateTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private CreateTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:CreateTableState)
+ }
+
+ /**
+ * Protobuf enum {@code DeleteTableState}
+ */
+ public enum DeleteTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>DELETE_TABLE_PRE_OPERATION = 1;</code>
+ */
+ DELETE_TABLE_PRE_OPERATION(0, 1),
+ /**
+ * <code>DELETE_TABLE_REMOVE_FROM_META = 2;</code>
+ */
+ DELETE_TABLE_REMOVE_FROM_META(1, 2),
+ /**
+ * <code>DELETE_TABLE_CLEAR_FS_LAYOUT = 3;</code>
+ */
+ DELETE_TABLE_CLEAR_FS_LAYOUT(2, 3),
+ /**
+ * <code>DELETE_TABLE_UPDATE_DESC_CACHE = 4;</code>
+ */
+ DELETE_TABLE_UPDATE_DESC_CACHE(3, 4),
+ /**
+ * <code>DELETE_TABLE_UNASSIGN_REGIONS = 5;</code>
+ */
+ DELETE_TABLE_UNASSIGN_REGIONS(4, 5),
+ /**
+ * <code>DELETE_TABLE_POST_OPERATION = 6;</code>
+ */
+ DELETE_TABLE_POST_OPERATION(5, 6),
+ ;
+
+ /**
+ * <code>DELETE_TABLE_PRE_OPERATION = 1;</code>
+ */
+ public static final int DELETE_TABLE_PRE_OPERATION_VALUE = 1;
+ /**
+ * <code>DELETE_TABLE_REMOVE_FROM_META = 2;</code>
+ */
+ public static final int DELETE_TABLE_REMOVE_FROM_META_VALUE = 2;
+ /**
+ * <code>DELETE_TABLE_CLEAR_FS_LAYOUT = 3;</code>
+ */
+ public static final int DELETE_TABLE_CLEAR_FS_LAYOUT_VALUE = 3;
+ /**
+ * <code>DELETE_TABLE_UPDATE_DESC_CACHE = 4;</code>
+ */
+ public static final int DELETE_TABLE_UPDATE_DESC_CACHE_VALUE = 4;
+ /**
+ * <code>DELETE_TABLE_UNASSIGN_REGIONS = 5;</code>
+ */
+ public static final int DELETE_TABLE_UNASSIGN_REGIONS_VALUE = 5;
+ /**
+ * <code>DELETE_TABLE_POST_OPERATION = 6;</code>
+ */
+ public static final int DELETE_TABLE_POST_OPERATION_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static DeleteTableState valueOf(int value) {
+ switch (value) {
+ case 1: return DELETE_TABLE_PRE_OPERATION;
+ case 2: return DELETE_TABLE_REMOVE_FROM_META;
+ case 3: return DELETE_TABLE_CLEAR_FS_LAYOUT;
+ case 4: return DELETE_TABLE_UPDATE_DESC_CACHE;
+ case 5: return DELETE_TABLE_UNASSIGN_REGIONS;
+ case 6: return DELETE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<DeleteTableState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<DeleteTableState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<DeleteTableState>() {
+ public DeleteTableState findValueByNumber(int number) {
+ return DeleteTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(1);
+ }
+
+ private static final DeleteTableState[] VALUES = values();
+
+ public static DeleteTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private DeleteTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:DeleteTableState)
+ }
+
+ public interface CreateTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableSchema table_schema = 2;
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ boolean hasTableSchema();
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema();
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder();
+
+ // repeated .RegionInfo region_info = 3;
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>
+ getRegionInfoList();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ int getRegionInfoCount();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code CreateTableStateData}
+ */
+ public static final class CreateTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements CreateTableStateDataOrBuilder {
+ // Use CreateTableStateData.newBuilder() to construct.
+ private CreateTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CreateTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CreateTableStateData defaultInstance;
+ public static CreateTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CreateTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CreateTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableSchema_.toBuilder();
+ }
+ tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableSchema_);
+ tableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<CreateTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<CreateTableStateData>() {
+ public CreateTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CreateTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<CreateTableStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableSchema table_schema = 2;
+ public static final int TABLE_SCHEMA_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_;
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ return tableSchema_;
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ return tableSchema_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ public static final int REGION_INFO_FIELD_NUMBER = 3;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableSchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(3, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableSchema() == other.hasTableSchema());
+ if (hasTableSchema()) {
+ result = result && getTableSchema()
+ .equals(other.getTableSchema());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableSchema()) {
+ hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getTableSchema().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CreateTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableSchemaFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableSchemaBuilder_ == null) {
+ result.tableSchema_ = tableSchema_;
+ } else {
+ result.tableSchema_ = tableSchemaBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableSchema()) {
+ mergeTableSchema(other.getTableSchema());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableSchema()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableSchema table_schema = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_;
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ return tableSchema_;
+ } else {
+ return tableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableSchema_ = value;
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public Builder setTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ tableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ tableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ tableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public Builder clearTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ if (tableSchemaBuilder_ != null) {
+ return tableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return tableSchema_;
+ }
+ }
+ /**
+ * <code>required .TableSchema table_schema = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableSchemaFieldBuilder() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ tableSchema_,
+ getParentForChildren(),
+ isClean());
+ tableSchema_ = null;
+ }
+ return tableSchemaBuilder_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>(regionInfo_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
+ } else {
+ return regionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder addAllRegionInfo(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
+ } else {
+ regionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder>
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CreateTableStateData)
+ }
+
+ static {
+ defaultInstance = new CreateTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CreateTableStateData)
+ }
+
+ public interface DeleteTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // repeated .RegionInfo region_info = 3;
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>
+ getRegionInfoList();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ int getRegionInfoCount();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code DeleteTableStateData}
+ */
+ public static final class DeleteTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements DeleteTableStateDataOrBuilder {
+ // Use DeleteTableStateData.newBuilder() to construct.
+ private DeleteTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DeleteTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DeleteTableStateData defaultInstance;
+ public static DeleteTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DeleteTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DeleteTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<DeleteTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<DeleteTableStateData>() {
+ public DeleteTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DeleteTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<DeleteTableStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ public static final int REGION_INFO_FIELD_NUMBER = 3;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(3, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code DeleteTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * <code>required .TableName table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /
<TRUNCATED>
[19/50] [abbrv] hbase git commit: HBASE-13290 Procedure v2 - client
enable/disable table sync (Stephen Yuan Jiang)
Posted by jm...@apache.org.
HBASE-13290 Procedure v2 - client enable/disable table sync (Stephen Yuan Jiang)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f6512065
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f6512065
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f6512065
Branch: refs/heads/hbase-11339
Commit: f6512065c2ee84a9c5af7d03f674c57c978b0b68
Parents: 57c70f0
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 21:53:51 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:43 2015 +0100
----------------------------------------------------------------------
.../apache/hadoop/hbase/client/HBaseAdmin.java | 230 ++++++--
.../hbase/protobuf/generated/MasterProtos.java | 583 ++++++++++++-------
hbase-protocol/src/main/protobuf/Master.proto | 2 +
.../org/apache/hadoop/hbase/master/HMaster.java | 10 +-
.../hadoop/hbase/master/MasterRpcServices.java | 8 +-
.../hadoop/hbase/master/MasterServices.java | 4 +-
.../hadoop/hbase/master/TestCatalogJanitor.java | 8 +-
7 files changed, 586 insertions(+), 259 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 7882737..efbc7d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -101,8 +101,10 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotReq
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
@@ -944,12 +946,20 @@ public class HBaseAdmin implements Admin {
@Override
public void enableTable(final TableName tableName)
throws IOException {
- enableTableAsync(tableName);
-
- // Wait until all regions are enabled
- waitUntilTableIsEnabled(tableName);
-
- LOG.info("Enabled table " + tableName);
+ Future<Void> future = enableTableAsyncV2(tableName);
+ try {
+ future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
+ } catch (TimeoutException e) {
+ throw new TimeoutIOException(e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
+ } else {
+ throw new IOException(e.getCause());
+ }
+ }
}
public void enableTable(final byte[] tableName)
@@ -1016,16 +1026,7 @@ public class HBaseAdmin implements Admin {
@Override
public void enableTableAsync(final TableName tableName)
throws IOException {
- TableName.isLegalFullyQualifiedTableName(tableName.getName());
- executeCallable(new MasterCallable<Void>(getConnection()) {
- @Override
- public Void call(int callTimeout) throws ServiceException {
- LOG.info("Started enable of " + tableName);
- EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
- master.enableTable(null,req);
- return null;
- }
- });
+ enableTableAsyncV2(tableName);
}
public void enableTableAsync(final byte[] tableName)
@@ -1039,6 +1040,84 @@ public class HBaseAdmin implements Admin {
}
/**
+ * Enable the table but does not block and wait for it be completely enabled.
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+ * It may throw ExecutionException if there was an error while executing the operation
+ * or TimeoutException in case the wait timeout was not long enough to allow the
+ * operation to complete.
+ *
+ * @param tableName name of table to delete
+ * @throws IOException if a remote or network exception occurs
+ * @return the result of the async enable. You can use Future.get(long, TimeUnit)
+ * to wait on the operation to complete.
+ */
+ // TODO: This should be called Async but it will break binary compatibility
+ private Future<Void> enableTableAsyncV2(final TableName tableName) throws IOException {
+ TableName.isLegalFullyQualifiedTableName(tableName.getName());
+ EnableTableResponse response = executeCallable(
+ new MasterCallable<EnableTableResponse>(getConnection()) {
+ @Override
+ public EnableTableResponse call(int callTimeout) throws ServiceException {
+ LOG.info("Started enable of " + tableName);
+ EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
+ return master.enableTable(null,req);
+ }
+ });
+ return new EnableTableFuture(this, tableName, response);
+ }
+
+ private static class EnableTableFuture extends ProcedureFuture<Void> {
+ private final TableName tableName;
+
+ public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
+ final EnableTableResponse response) {
+ super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
+ this.tableName = tableName;
+ }
+
+ @Override
+ protected Void waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitTableEnabled(deadlineTs);
+ return null;
+ }
+
+ @Override
+ protected Void postOperationResult(final Void result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ LOG.info("Enabled " + tableName);
+ return result;
+ }
+
+ private void waitTableEnabled(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitForState(deadlineTs, new WaitForStateCallable() {
+ @Override
+ public boolean checkState(int tries) throws IOException {
+ boolean enabled;
+ try {
+ enabled = getAdmin().isTableEnabled(tableName);
+ } catch (TableNotFoundException tnfe) {
+ return false;
+ }
+ return enabled && getAdmin().isTableAvailable(tableName);
+ }
+
+ @Override
+ public void throwInterruptedException() throws InterruptedIOException {
+ throw new InterruptedIOException("Interrupted when waiting for table to be enabled");
+ }
+
+ @Override
+ public void throwTimeoutException(long elapsedTime) throws TimeoutException {
+ throw new TimeoutException("Table " + tableName + " not yet enabled after " +
+ elapsedTime + "msec");
+ }
+ });
+ }
+ }
+
+ /**
* Enable tables matching the passed in pattern and wait on completion.
*
* Warning: Use this method carefully, there is no prompting and the effect is
@@ -1096,16 +1175,7 @@ public class HBaseAdmin implements Admin {
*/
@Override
public void disableTableAsync(final TableName tableName) throws IOException {
- TableName.isLegalFullyQualifiedTableName(tableName.getName());
- executeCallable(new MasterCallable<Void>(getConnection()) {
- @Override
- public Void call(int callTimeout) throws ServiceException {
- LOG.info("Started disable of " + tableName);
- DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
- master.disableTable(null,req);
- return null;
- }
- });
+ disableTableAsyncV2(tableName);
}
public void disableTableAsync(final byte[] tableName) throws IOException {
@@ -1130,32 +1200,20 @@ public class HBaseAdmin implements Admin {
@Override
public void disableTable(final TableName tableName)
throws IOException {
- disableTableAsync(tableName);
- // Wait until table is disabled
- boolean disabled = false;
- for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
- disabled = isTableDisabled(tableName);
- if (disabled) {
- break;
- }
- long sleep = getPauseTime(tries);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
- "disabled in " + tableName);
- }
- try {
- Thread.sleep(sleep);
- } catch (InterruptedException e) {
- // Do this conversion rather than let it out because do not want to
- // change the method signature.
- throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
+ Future<Void> future = disableTableAsyncV2(tableName);
+ try {
+ future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
+ } catch (TimeoutException e) {
+ throw new TimeoutIOException(e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
+ } else {
+ throw new IOException(e.getCause());
}
}
- if (!disabled) {
- throw new RegionException("Retries exhausted, it took too long to wait"+
- " for the table " + tableName + " to be disabled.");
- }
- LOG.info("Disabled " + tableName);
}
public void disableTable(final byte[] tableName)
@@ -1169,6 +1227,78 @@ public class HBaseAdmin implements Admin {
}
/**
+ * Disable the table but does not block and wait for it be completely disabled.
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+ * It may throw ExecutionException if there was an error while executing the operation
+ * or TimeoutException in case the wait timeout was not long enough to allow the
+ * operation to complete.
+ *
+ * @param tableName name of table to delete
+ * @throws IOException if a remote or network exception occurs
+ * @return the result of the async disable. You can use Future.get(long, TimeUnit)
+ * to wait on the operation to complete.
+ */
+ // TODO: This should be called Async but it will break binary compatibility
+ private Future<Void> disableTableAsyncV2(final TableName tableName) throws IOException {
+ TableName.isLegalFullyQualifiedTableName(tableName.getName());
+ DisableTableResponse response = executeCallable(
+ new MasterCallable<DisableTableResponse>(getConnection()) {
+ @Override
+ public DisableTableResponse call(int callTimeout) throws ServiceException {
+ LOG.info("Started disable of " + tableName);
+ DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
+ return master.disableTable(null, req);
+ }
+ });
+ return new DisableTableFuture(this, tableName, response);
+ }
+
+ private static class DisableTableFuture extends ProcedureFuture<Void> {
+ private final TableName tableName;
+
+ public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
+ final DisableTableResponse response) {
+ super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
+ this.tableName = tableName;
+ }
+
+ @Override
+ protected Void waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitTableDisabled(deadlineTs);
+ return null;
+ }
+
+ @Override
+ protected Void postOperationResult(final Void result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ LOG.info("Disabled " + tableName);
+ return result;
+ }
+
+ private void waitTableDisabled(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitForState(deadlineTs, new WaitForStateCallable() {
+ @Override
+ public boolean checkState(int tries) throws IOException {
+ return getAdmin().isTableDisabled(tableName);
+ }
+
+ @Override
+ public void throwInterruptedException() throws InterruptedIOException {
+ throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
+ }
+
+ @Override
+ public void throwTimeoutException(long elapsedTime) throws TimeoutException {
+ throw new TimeoutException("Table " + tableName + " not yet disabled after " +
+ elapsedTime + "msec");
+ }
+ });
+ }
+ }
+
+ /**
* Disable tables matching the passed in pattern and wait on completion.
*
* Warning: Use this method carefully, there is no prompting and the effect is
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index cc6f201..463f82f 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -12053,6 +12053,16 @@ public final class MasterProtos {
public interface EnableTableResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // optional uint64 proc_id = 1;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ long getProcId();
}
/**
* Protobuf type {@code EnableTableResponse}
@@ -12087,6 +12097,7 @@ public final class MasterProtos {
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
@@ -12104,6 +12115,11 @@ public final class MasterProtos {
}
break;
}
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -12143,7 +12159,25 @@ public final class MasterProtos {
return PARSER;
}
+ private int bitField0_;
+ // optional uint64 proc_id = 1;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
private void initFields() {
+ procId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -12157,6 +12191,9 @@ public final class MasterProtos {
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -12166,6 +12203,10 @@ public final class MasterProtos {
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -12189,6 +12230,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse) obj;
boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -12202,6 +12248,10 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -12311,6 +12361,8 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@@ -12337,6 +12389,13 @@ public final class MasterProtos {
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.procId_ = procId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -12352,6 +12411,9 @@ public final class MasterProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance()) return this;
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -12377,6 +12439,40 @@ public final class MasterProtos {
}
return this;
}
+ private int bitField0_;
+
+ // optional uint64 proc_id = 1;
+ private long procId_ ;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:EnableTableResponse)
}
@@ -12952,6 +13048,16 @@ public final class MasterProtos {
public interface DisableTableResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // optional uint64 proc_id = 1;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ long getProcId();
}
/**
* Protobuf type {@code DisableTableResponse}
@@ -12986,6 +13092,7 @@ public final class MasterProtos {
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
@@ -13003,6 +13110,11 @@ public final class MasterProtos {
}
break;
}
+ case 8: {
+ bitField0_ |= 0x00000001;
+ procId_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -13042,7 +13154,25 @@ public final class MasterProtos {
return PARSER;
}
+ private int bitField0_;
+ // optional uint64 proc_id = 1;
+ public static final int PROC_ID_FIELD_NUMBER = 1;
+ private long procId_;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
private void initFields() {
+ procId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -13056,6 +13186,9 @@ public final class MasterProtos {
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, procId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -13065,6 +13198,10 @@ public final class MasterProtos {
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, procId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -13088,6 +13225,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse) obj;
boolean result = true;
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -13101,6 +13243,10 @@ public final class MasterProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -13210,6 +13356,8 @@ public final class MasterProtos {
public Builder clear() {
super.clear();
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@@ -13236,6 +13384,13 @@ public final class MasterProtos {
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.procId_ = procId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@@ -13251,6 +13406,9 @@ public final class MasterProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance()) return this;
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -13276,6 +13434,40 @@ public final class MasterProtos {
}
return this;
}
+ private int bitField0_;
+
+ // optional uint64 proc_id = 1;
+ private long procId_ ;
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000001;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 proc_id = 1;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:DisableTableResponse)
}
@@ -52960,199 +53152,200 @@ public final class MasterProtos {
"e\030\001 \002(\0132\n.TableName\022\035\n\016preserveSplits\030\002 " +
"\001(\010:\005false\"\027\n\025TruncateTableResponse\"4\n\022E" +
"nableTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n." +
- "TableName\"\025\n\023EnableTableResponse\"5\n\023Disa" +
- "bleTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" +
- "bleName\"\026\n\024DisableTableResponse\"X\n\022Modif" +
- "yTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Tabl" +
- "eName\022\"\n\014table_schema\030\002 \002(\0132\014.TableSchem" +
- "a\"\025\n\023ModifyTableResponse\"K\n\026CreateNamesp",
- "aceRequest\0221\n\023namespaceDescriptor\030\001 \002(\0132" +
- "\024.NamespaceDescriptor\"\031\n\027CreateNamespace" +
- "Response\"/\n\026DeleteNamespaceRequest\022\025\n\rna" +
- "mespaceName\030\001 \002(\t\"\031\n\027DeleteNamespaceResp" +
- "onse\"K\n\026ModifyNamespaceRequest\0221\n\023namesp" +
- "aceDescriptor\030\001 \002(\0132\024.NamespaceDescripto" +
- "r\"\031\n\027ModifyNamespaceResponse\"6\n\035GetNames" +
- "paceDescriptorRequest\022\025\n\rnamespaceName\030\001" +
- " \002(\t\"S\n\036GetNamespaceDescriptorResponse\0221" +
- "\n\023namespaceDescriptor\030\001 \002(\0132\024.NamespaceD",
- "escriptor\"!\n\037ListNamespaceDescriptorsReq" +
- "uest\"U\n ListNamespaceDescriptorsResponse" +
- "\0221\n\023namespaceDescriptor\030\001 \003(\0132\024.Namespac" +
- "eDescriptor\"?\n&ListTableDescriptorsByNam" +
- "espaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"L\n\'" +
- "ListTableDescriptorsByNamespaceResponse\022" +
- "!\n\013tableSchema\030\001 \003(\0132\014.TableSchema\"9\n Li" +
- "stTableNamesByNamespaceRequest\022\025\n\rnamesp" +
- "aceName\030\001 \002(\t\"B\n!ListTableNamesByNamespa" +
- "ceResponse\022\035\n\ttableName\030\001 \003(\0132\n.TableNam",
- "e\"\021\n\017ShutdownRequest\"\022\n\020ShutdownResponse" +
- "\"\023\n\021StopMasterRequest\"\024\n\022StopMasterRespo" +
- "nse\"\020\n\016BalanceRequest\"\'\n\017BalanceResponse" +
- "\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRun" +
- "ningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002" +
- " \001(\010\"8\n\032SetBalancerRunningResponse\022\032\n\022pr" +
- "ev_balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnab" +
- "ledRequest\",\n\031IsBalancerEnabledResponse\022" +
- "\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogScanReques" +
- "t\"-\n\026RunCatalogScanResponse\022\023\n\013scan_resu",
- "lt\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022" +
- "\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJanitorR" +
- "esponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalog" +
- "JanitorEnabledRequest\"0\n\037IsCatalogJanito" +
- "rEnabledResponse\022\r\n\005value\030\001 \002(\010\"9\n\017Snaps" +
- "hotRequest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotD" +
- "escription\",\n\020SnapshotResponse\022\030\n\020expect" +
- "ed_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapshot" +
- "sRequest\"H\n\035GetCompletedSnapshotsRespons" +
- "e\022\'\n\tsnapshots\030\001 \003(\0132\024.SnapshotDescripti",
- "on\"?\n\025DeleteSnapshotRequest\022&\n\010snapshot\030" +
- "\001 \002(\0132\024.SnapshotDescription\"\030\n\026DeleteSna" +
- "pshotResponse\"@\n\026RestoreSnapshotRequest\022" +
- "&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescription\"" +
- "\031\n\027RestoreSnapshotResponse\"?\n\025IsSnapshot" +
- "DoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snapshot" +
- "Description\"U\n\026IsSnapshotDoneResponse\022\023\n" +
- "\004done\030\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.S" +
- "napshotDescription\"F\n\034IsRestoreSnapshotD" +
- "oneRequest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotD",
- "escription\"4\n\035IsRestoreSnapshotDoneRespo" +
- "nse\022\023\n\004done\030\001 \001(\010:\005false\"=\n\033GetSchemaAlt" +
- "erStatusRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" +
- "bleName\"T\n\034GetSchemaAlterStatusResponse\022" +
- "\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtotal_" +
- "regions\030\002 \001(\r\"\202\001\n\032GetTableDescriptorsReq" +
- "uest\022\037\n\013table_names\030\001 \003(\0132\n.TableName\022\r\n" +
- "\005regex\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010" +
- ":\005false\022\021\n\tnamespace\030\004 \001(\t\"A\n\033GetTableDe" +
- "scriptorsResponse\022\"\n\014table_schema\030\001 \003(\0132",
- "\014.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" +
- "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" +
- ":\005false\022\021\n\tnamespace\030\003 \001(\t\"8\n\025GetTableNa" +
- "mesResponse\022\037\n\013table_names\030\001 \003(\0132\n.Table" +
- "Name\"6\n\024GetTableStateRequest\022\036\n\ntable_na" +
- "me\030\001 \002(\0132\n.TableName\"9\n\025GetTableStateRes" +
- "ponse\022 \n\013table_state\030\001 \002(\0132\013.TableState\"" +
- "\031\n\027GetClusterStatusRequest\"B\n\030GetCluster" +
- "StatusResponse\022&\n\016cluster_status\030\001 \002(\0132\016" +
- ".ClusterStatus\"\030\n\026IsMasterRunningRequest",
- "\"4\n\027IsMasterRunningResponse\022\031\n\021is_master" +
- "_running\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(" +
- "\n\tprocedure\030\001 \002(\0132\025.ProcedureDescription" +
- "\"F\n\025ExecProcedureResponse\022\030\n\020expected_ti" +
- "meout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsPr" +
- "ocedureDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025." +
- "ProcedureDescription\"W\n\027IsProcedureDoneR" +
- "esponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot" +
- "\030\002 \001(\0132\025.ProcedureDescription\",\n\031GetProc" +
- "edureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"\347\001\n\032",
- "GetProcedureResultResponse\0220\n\005state\030\001 \002(" +
- "\0162!.GetProcedureResultResponse.State\022\022\n\n" +
- "start_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n" +
- "\006result\030\004 \001(\014\022+\n\texception\030\005 \001(\0132\030.Forei" +
- "gnExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND" +
- "\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"\273\001\n\017SetQuo" +
- "taRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_gro" +
- "up\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_nam" +
- "e\030\004 \001(\0132\n.TableName\022\022\n\nremove_all\030\005 \001(\010\022" +
- "\026\n\016bypass_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(",
- "\0132\020.ThrottleRequest\"\022\n\020SetQuotaResponse\"" +
- "A\n\037MajorCompactionTimestampRequest\022\036\n\nta" +
- "ble_name\030\001 \002(\0132\n.TableName\"L\n(MajorCompa" +
- "ctionTimestampForRegionRequest\022 \n\006region" +
- "\030\001 \002(\0132\020.RegionSpecifier\"@\n MajorCompact" +
- "ionTimestampResponse\022\034\n\024compaction_times" +
- "tamp\030\001 \002(\0032\343\033\n\rMasterService\022S\n\024GetSchem" +
- "aAlterStatus\022\034.GetSchemaAlterStatusReque" +
- "st\032\035.GetSchemaAlterStatusResponse\022P\n\023Get" +
- "TableDescriptors\022\033.GetTableDescriptorsRe",
- "quest\032\034.GetTableDescriptorsResponse\022>\n\rG" +
- "etTableNames\022\025.GetTableNamesRequest\032\026.Ge" +
- "tTableNamesResponse\022G\n\020GetClusterStatus\022" +
- "\030.GetClusterStatusRequest\032\031.GetClusterSt" +
- "atusResponse\022D\n\017IsMasterRunning\022\027.IsMast" +
- "erRunningRequest\032\030.IsMasterRunningRespon" +
- "se\0222\n\tAddColumn\022\021.AddColumnRequest\032\022.Add" +
- "ColumnResponse\022;\n\014DeleteColumn\022\024.DeleteC" +
- "olumnRequest\032\025.DeleteColumnResponse\022;\n\014M" +
- "odifyColumn\022\024.ModifyColumnRequest\032\025.Modi",
- "fyColumnResponse\0225\n\nMoveRegion\022\022.MoveReg" +
- "ionRequest\032\023.MoveRegionResponse\022Y\n\026Dispa" +
- "tchMergingRegions\022\036.DispatchMergingRegio" +
- "nsRequest\032\037.DispatchMergingRegionsRespon" +
- "se\022;\n\014AssignRegion\022\024.AssignRegionRequest" +
- "\032\025.AssignRegionResponse\022A\n\016UnassignRegio" +
- "n\022\026.UnassignRegionRequest\032\027.UnassignRegi" +
- "onResponse\022>\n\rOfflineRegion\022\025.OfflineReg" +
- "ionRequest\032\026.OfflineRegionResponse\0228\n\013De" +
- "leteTable\022\023.DeleteTableRequest\032\024.DeleteT",
- "ableResponse\022>\n\rtruncateTable\022\025.Truncate" +
- "TableRequest\032\026.TruncateTableResponse\0228\n\013" +
- "EnableTable\022\023.EnableTableRequest\032\024.Enabl" +
- "eTableResponse\022;\n\014DisableTable\022\024.Disable" +
- "TableRequest\032\025.DisableTableResponse\0228\n\013M" +
- "odifyTable\022\023.ModifyTableRequest\032\024.Modify" +
- "TableResponse\0228\n\013CreateTable\022\023.CreateTab" +
- "leRequest\032\024.CreateTableResponse\022/\n\010Shutd" +
- "own\022\020.ShutdownRequest\032\021.ShutdownResponse" +
- "\0225\n\nStopMaster\022\022.StopMasterRequest\032\023.Sto",
- "pMasterResponse\022,\n\007Balance\022\017.BalanceRequ" +
- "est\032\020.BalanceResponse\022M\n\022SetBalancerRunn" +
- "ing\022\032.SetBalancerRunningRequest\032\033.SetBal" +
- "ancerRunningResponse\022J\n\021IsBalancerEnable" +
- "d\022\031.IsBalancerEnabledRequest\032\032.IsBalance" +
- "rEnabledResponse\022A\n\016RunCatalogScan\022\026.Run" +
- "CatalogScanRequest\032\027.RunCatalogScanRespo" +
- "nse\022S\n\024EnableCatalogJanitor\022\034.EnableCata" +
- "logJanitorRequest\032\035.EnableCatalogJanitor" +
- "Response\022\\\n\027IsCatalogJanitorEnabled\022\037.Is",
- "CatalogJanitorEnabledRequest\032 .IsCatalog" +
- "JanitorEnabledResponse\022L\n\021ExecMasterServ" +
- "ice\022\032.CoprocessorServiceRequest\032\033.Coproc" +
- "essorServiceResponse\022/\n\010Snapshot\022\020.Snaps" +
- "hotRequest\032\021.SnapshotResponse\022V\n\025GetComp" +
- "letedSnapshots\022\035.GetCompletedSnapshotsRe" +
- "quest\032\036.GetCompletedSnapshotsResponse\022A\n" +
- "\016DeleteSnapshot\022\026.DeleteSnapshotRequest\032" +
- "\027.DeleteSnapshotResponse\022A\n\016IsSnapshotDo" +
- "ne\022\026.IsSnapshotDoneRequest\032\027.IsSnapshotD",
- "oneResponse\022D\n\017RestoreSnapshot\022\027.Restore" +
- "SnapshotRequest\032\030.RestoreSnapshotRespons" +
- "e\022V\n\025IsRestoreSnapshotDone\022\035.IsRestoreSn" +
- "apshotDoneRequest\032\036.IsRestoreSnapshotDon" +
- "eResponse\022>\n\rExecProcedure\022\025.ExecProcedu" +
- "reRequest\032\026.ExecProcedureResponse\022E\n\024Exe" +
- "cProcedureWithRet\022\025.ExecProcedureRequest" +
- "\032\026.ExecProcedureResponse\022D\n\017IsProcedureD" +
- "one\022\027.IsProcedureDoneRequest\032\030.IsProcedu" +
- "reDoneResponse\022D\n\017ModifyNamespace\022\027.Modi",
- "fyNamespaceRequest\032\030.ModifyNamespaceResp" +
- "onse\022D\n\017CreateNamespace\022\027.CreateNamespac" +
- "eRequest\032\030.CreateNamespaceResponse\022D\n\017De" +
- "leteNamespace\022\027.DeleteNamespaceRequest\032\030" +
- ".DeleteNamespaceResponse\022Y\n\026GetNamespace" +
- "Descriptor\022\036.GetNamespaceDescriptorReque" +
- "st\032\037.GetNamespaceDescriptorResponse\022_\n\030L" +
- "istNamespaceDescriptors\022 .ListNamespaceD" +
- "escriptorsRequest\032!.ListNamespaceDescrip" +
- "torsResponse\022t\n\037ListTableDescriptorsByNa",
- "mespace\022\'.ListTableDescriptorsByNamespac" +
- "eRequest\032(.ListTableDescriptorsByNamespa" +
- "ceResponse\022b\n\031ListTableNamesByNamespace\022" +
- "!.ListTableNamesByNamespaceRequest\032\".Lis" +
- "tTableNamesByNamespaceResponse\022>\n\rGetTab" +
- "leState\022\025.GetTableStateRequest\032\026.GetTabl" +
- "eStateResponse\022/\n\010SetQuota\022\020.SetQuotaReq" +
- "uest\032\021.SetQuotaResponse\022f\n\037getLastMajorC" +
- "ompactionTimestamp\022 .MajorCompactionTime" +
- "stampRequest\032!.MajorCompactionTimestampR",
- "esponse\022x\n(getLastMajorCompactionTimesta" +
- "mpForRegion\022).MajorCompactionTimestampFo" +
- "rRegionRequest\032!.MajorCompactionTimestam" +
- "pResponse\022M\n\022getProcedureResult\022\032.GetPro" +
- "cedureResultRequest\032\033.GetProcedureResult" +
- "ResponseBB\n*org.apache.hadoop.hbase.prot" +
- "obuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001"
+ "TableName\"&\n\023EnableTableResponse\022\017\n\007proc" +
+ "_id\030\001 \001(\004\"5\n\023DisableTableRequest\022\036\n\ntabl" +
+ "e_name\030\001 \002(\0132\n.TableName\"\'\n\024DisableTable" +
+ "Response\022\017\n\007proc_id\030\001 \001(\004\"X\n\022ModifyTable" +
+ "Request\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022" +
+ "\"\n\014table_schema\030\002 \002(\0132\014.TableSchema\"\025\n\023M",
+ "odifyTableResponse\"K\n\026CreateNamespaceReq" +
+ "uest\0221\n\023namespaceDescriptor\030\001 \002(\0132\024.Name" +
+ "spaceDescriptor\"\031\n\027CreateNamespaceRespon" +
+ "se\"/\n\026DeleteNamespaceRequest\022\025\n\rnamespac" +
+ "eName\030\001 \002(\t\"\031\n\027DeleteNamespaceResponse\"K" +
+ "\n\026ModifyNamespaceRequest\0221\n\023namespaceDes" +
+ "criptor\030\001 \002(\0132\024.NamespaceDescriptor\"\031\n\027M" +
+ "odifyNamespaceResponse\"6\n\035GetNamespaceDe" +
+ "scriptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"S" +
+ "\n\036GetNamespaceDescriptorResponse\0221\n\023name",
+ "spaceDescriptor\030\001 \002(\0132\024.NamespaceDescrip" +
+ "tor\"!\n\037ListNamespaceDescriptorsRequest\"U" +
+ "\n ListNamespaceDescriptorsResponse\0221\n\023na" +
+ "mespaceDescriptor\030\001 \003(\0132\024.NamespaceDescr" +
+ "iptor\"?\n&ListTableDescriptorsByNamespace" +
+ "Request\022\025\n\rnamespaceName\030\001 \002(\t\"L\n\'ListTa" +
+ "bleDescriptorsByNamespaceResponse\022!\n\013tab" +
+ "leSchema\030\001 \003(\0132\014.TableSchema\"9\n ListTabl" +
+ "eNamesByNamespaceRequest\022\025\n\rnamespaceNam" +
+ "e\030\001 \002(\t\"B\n!ListTableNamesByNamespaceResp",
+ "onse\022\035\n\ttableName\030\001 \003(\0132\n.TableName\"\021\n\017S" +
+ "hutdownRequest\"\022\n\020ShutdownResponse\"\023\n\021St" +
+ "opMasterRequest\"\024\n\022StopMasterResponse\"\020\n" +
+ "\016BalanceRequest\"\'\n\017BalanceResponse\022\024\n\014ba" +
+ "lancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunningRe" +
+ "quest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8" +
+ "\n\032SetBalancerRunningResponse\022\032\n\022prev_bal" +
+ "ance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabledReq" +
+ "uest\",\n\031IsBalancerEnabledResponse\022\017\n\007ena" +
+ "bled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026R",
+ "unCatalogScanResponse\022\023\n\013scan_result\030\001 \001" +
+ "(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006ena" +
+ "ble\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespons" +
+ "e\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanito" +
+ "rEnabledRequest\"0\n\037IsCatalogJanitorEnabl" +
+ "edResponse\022\r\n\005value\030\001 \002(\010\"9\n\017SnapshotReq" +
+ "uest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescrip" +
+ "tion\",\n\020SnapshotResponse\022\030\n\020expected_tim" +
+ "eout\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsReque" +
+ "st\"H\n\035GetCompletedSnapshotsResponse\022\'\n\ts",
+ "napshots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025" +
+ "DeleteSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132" +
+ "\024.SnapshotDescription\"\030\n\026DeleteSnapshotR" +
+ "esponse\"@\n\026RestoreSnapshotRequest\022&\n\010sna" +
+ "pshot\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Res" +
+ "toreSnapshotResponse\"?\n\025IsSnapshotDoneRe" +
+ "quest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescri" +
+ "ption\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030" +
+ "\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.Snapsho" +
+ "tDescription\"F\n\034IsRestoreSnapshotDoneReq",
+ "uest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescrip" +
+ "tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n" +
+ "\004done\030\001 \001(\010:\005false\"=\n\033GetSchemaAlterStat" +
+ "usRequest\022\036\n\ntable_name\030\001 \002(\0132\n.TableNam" +
+ "e\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025yet" +
+ "_to_update_regions\030\001 \001(\r\022\025\n\rtotal_region" +
+ "s\030\002 \001(\r\"\202\001\n\032GetTableDescriptorsRequest\022\037" +
+ "\n\013table_names\030\001 \003(\0132\n.TableName\022\r\n\005regex" +
+ "\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005fals" +
+ "e\022\021\n\tnamespace\030\004 \001(\t\"A\n\033GetTableDescript",
+ "orsResponse\022\"\n\014table_schema\030\001 \003(\0132\014.Tabl" +
+ "eSchema\"[\n\024GetTableNamesRequest\022\r\n\005regex" +
+ "\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005fals" +
+ "e\022\021\n\tnamespace\030\003 \001(\t\"8\n\025GetTableNamesRes" +
+ "ponse\022\037\n\013table_names\030\001 \003(\0132\n.TableName\"6" +
+ "\n\024GetTableStateRequest\022\036\n\ntable_name\030\001 \002" +
+ "(\0132\n.TableName\"9\n\025GetTableStateResponse\022" +
+ " \n\013table_state\030\001 \002(\0132\013.TableState\"\031\n\027Get" +
+ "ClusterStatusRequest\"B\n\030GetClusterStatus" +
+ "Response\022&\n\016cluster_status\030\001 \002(\0132\016.Clust",
+ "erStatus\"\030\n\026IsMasterRunningRequest\"4\n\027Is" +
+ "MasterRunningResponse\022\031\n\021is_master_runni" +
+ "ng\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n\tproc" +
+ "edure\030\001 \002(\0132\025.ProcedureDescription\"F\n\025Ex" +
+ "ecProcedureResponse\022\030\n\020expected_timeout\030" +
+ "\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsProcedur" +
+ "eDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025.Proced" +
+ "ureDescription\"W\n\027IsProcedureDoneRespons" +
+ "e\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\013" +
+ "2\025.ProcedureDescription\",\n\031GetProcedureR",
+ "esultRequest\022\017\n\007proc_id\030\001 \002(\004\"\347\001\n\032GetPro" +
+ "cedureResultResponse\0220\n\005state\030\001 \002(\0162!.Ge" +
+ "tProcedureResultResponse.State\022\022\n\nstart_" +
+ "time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006resul" +
+ "t\030\004 \001(\014\022+\n\texception\030\005 \001(\0132\030.ForeignExce" +
+ "ptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007" +
+ "RUNNING\020\001\022\014\n\010FINISHED\020\002\"\273\001\n\017SetQuotaRequ" +
+ "est\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001" +
+ "(\t\022\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(" +
+ "\0132\n.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp",
+ "ass_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Th" +
+ "rottleRequest\"\022\n\020SetQuotaResponse\"A\n\037Maj" +
+ "orCompactionTimestampRequest\022\036\n\ntable_na" +
+ "me\030\001 \002(\0132\n.TableName\"L\n(MajorCompactionT" +
+ "imestampForRegionRequest\022 \n\006region\030\001 \002(\013" +
+ "2\020.RegionSpecifier\"@\n MajorCompactionTim" +
+ "estampResponse\022\034\n\024compaction_timestamp\030\001" +
+ " \002(\0032\343\033\n\rMasterService\022S\n\024GetSchemaAlter" +
+ "Status\022\034.GetSchemaAlterStatusRequest\032\035.G" +
+ "etSchemaAlterStatusResponse\022P\n\023GetTableD",
+ "escriptors\022\033.GetTableDescriptorsRequest\032" +
+ "\034.GetTableDescriptorsResponse\022>\n\rGetTabl" +
+ "eNames\022\025.GetTableNamesRequest\032\026.GetTable" +
+ "NamesResponse\022G\n\020GetClusterStatus\022\030.GetC" +
+ "lusterStatusRequest\032\031.GetClusterStatusRe" +
+ "sponse\022D\n\017IsMasterRunning\022\027.IsMasterRunn" +
+ "ingRequest\032\030.IsMasterRunningResponse\0222\n\t" +
+ "AddColumn\022\021.AddColumnRequest\032\022.AddColumn" +
+ "Response\022;\n\014DeleteColumn\022\024.DeleteColumnR" +
+ "equest\032\025.DeleteColumnResponse\022;\n\014ModifyC",
+ "olumn\022\024.ModifyColumnRequest\032\025.ModifyColu" +
+ "mnResponse\0225\n\nMoveRegion\022\022.MoveRegionReq" +
+ "uest\032\023.MoveRegionResponse\022Y\n\026DispatchMer" +
+ "gingRegions\022\036.DispatchMergingRegionsRequ" +
+ "est\032\037.DispatchMergingRegionsResponse\022;\n\014" +
+ "AssignRegion\022\024.AssignRegionRequest\032\025.Ass" +
+ "ignRegionResponse\022A\n\016UnassignRegion\022\026.Un" +
+ "assignRegionRequest\032\027.UnassignRegionResp" +
+ "onse\022>\n\rOfflineRegion\022\025.OfflineRegionReq" +
+ "uest\032\026.OfflineRegionResponse\0228\n\013DeleteTa",
+ "ble\022\023.DeleteTableRequest\032\024.DeleteTableRe" +
+ "sponse\022>\n\rtruncateTable\022\025.TruncateTableR" +
+ "equest\032\026.TruncateTableResponse\0228\n\013Enable" +
+ "Table\022\023.EnableTableRequest\032\024.EnableTable" +
+ "Response\022;\n\014DisableTable\022\024.DisableTableR" +
+ "equest\032\025.DisableTableResponse\0228\n\013ModifyT" +
+ "able\022\023.ModifyTableRequest\032\024.ModifyTableR" +
+ "esponse\0228\n\013CreateTable\022\023.CreateTableRequ" +
+ "est\032\024.CreateTableResponse\022/\n\010Shutdown\022\020." +
+ "ShutdownRequest\032\021.ShutdownResponse\0225\n\nSt",
+ "opMaster\022\022.StopMasterRequest\032\023.StopMaste" +
+ "rResponse\022,\n\007Balance\022\017.BalanceRequest\032\020." +
+ "BalanceResponse\022M\n\022SetBalancerRunning\022\032." +
+ "SetBalancerRunningRequest\032\033.SetBalancerR" +
+ "unningResponse\022J\n\021IsBalancerEnabled\022\031.Is" +
+ "BalancerEnabledRequest\032\032.IsBalancerEnabl" +
+ "edResponse\022A\n\016RunCatalogScan\022\026.RunCatalo" +
+ "gScanRequest\032\027.RunCatalogScanResponse\022S\n" +
+ "\024EnableCatalogJanitor\022\034.EnableCatalogJan" +
+ "itorRequest\032\035.EnableCatalogJanitorRespon",
+ "se\022\\\n\027IsCatalogJanitorEnabled\022\037.IsCatalo" +
+ "gJanitorEnabledRequest\032 .IsCatalogJanito" +
+ "rEnabledResponse\022L\n\021ExecMasterService\022\032." +
+ "CoprocessorServiceRequest\032\033.CoprocessorS" +
+ "erviceResponse\022/\n\010Snapshot\022\020.SnapshotReq" +
+ "uest\032\021.SnapshotResponse\022V\n\025GetCompletedS" +
+ "napshots\022\035.GetCompletedSnapshotsRequest\032" +
+ "\036.GetCompletedSnapshotsResponse\022A\n\016Delet" +
+ "eSnapshot\022\026.DeleteSnapshotRequest\032\027.Dele" +
+ "teSnapshotResponse\022A\n\016IsSnapshotDone\022\026.I",
+ "sSnapshotDoneRequest\032\027.IsSnapshotDoneRes" +
+ "ponse\022D\n\017RestoreSnapshot\022\027.RestoreSnapsh" +
+ "otRequest\032\030.RestoreSnapshotResponse\022V\n\025I" +
+ "sRestoreSnapshotDone\022\035.IsRestoreSnapshot" +
+ "DoneRequest\032\036.IsRestoreSnapshotDoneRespo" +
+ "nse\022>\n\rExecProcedure\022\025.ExecProcedureRequ" +
+ "est\032\026.ExecProcedureResponse\022E\n\024ExecProce" +
+ "dureWithRet\022\025.ExecProcedureRequest\032\026.Exe" +
+ "cProcedureResponse\022D\n\017IsProcedureDone\022\027." +
+ "IsProcedureDoneRequest\032\030.IsProcedureDone",
+ "Response\022D\n\017ModifyNamespace\022\027.ModifyName" +
+ "spaceRequest\032\030.ModifyNamespaceResponse\022D" +
+ "\n\017CreateNamespace\022\027.CreateNamespaceReque" +
+ "st\032\030.CreateNamespaceResponse\022D\n\017DeleteNa" +
+ "mespace\022\027.DeleteNamespaceRequest\032\030.Delet" +
+ "eNamespaceResponse\022Y\n\026GetNamespaceDescri" +
+ "ptor\022\036.GetNamespaceDescriptorRequest\032\037.G" +
+ "etNamespaceDescriptorResponse\022_\n\030ListNam" +
+ "espaceDescriptors\022 .ListNamespaceDescrip" +
+ "torsRequest\032!.ListNamespaceDescriptorsRe",
+ "sponse\022t\n\037ListTableDescriptorsByNamespac" +
+ "e\022\'.ListTableDescriptorsByNamespaceReque" +
+ "st\032(.ListTableDescriptorsByNamespaceResp" +
+ "onse\022b\n\031ListTableNamesByNamespace\022!.List" +
+ "TableNamesByNamespaceRequest\032\".ListTable" +
+ "NamesByNamespaceResponse\022>\n\rGetTableStat" +
+ "e\022\025.GetTableStateRequest\032\026.GetTableState" +
+ "Response\022/\n\010SetQuota\022\020.SetQuotaRequest\032\021" +
+ ".SetQuotaResponse\022f\n\037getLastMajorCompact" +
+ "ionTimestamp\022 .MajorCompactionTimestampR",
+ "equest\032!.MajorCompactionTimestampRespons" +
+ "e\022x\n(getLastMajorCompactionTimestampForR" +
+ "egion\022).MajorCompactionTimestampForRegio" +
+ "nRequest\032!.MajorCompactionTimestampRespo" +
+ "nse\022M\n\022getProcedureResult\022\032.GetProcedure" +
+ "ResultRequest\032\033.GetProcedureResultRespon" +
+ "seBB\n*org.apache.hadoop.hbase.protobuf.g" +
+ "eneratedB\014MasterProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -53302,7 +53495,7 @@ public final class MasterProtos {
internal_static_EnableTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EnableTableResponse_descriptor,
- new java.lang.String[] { });
+ new java.lang.String[] { "ProcId", });
internal_static_DisableTableRequest_descriptor =
getDescriptor().getMessageTypes().get(24);
internal_static_DisableTableRequest_fieldAccessorTable = new
@@ -53314,7 +53507,7 @@ public final class MasterProtos {
internal_static_DisableTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DisableTableResponse_descriptor,
- new java.lang.String[] { });
+ new java.lang.String[] { "ProcId", });
internal_static_ModifyTableRequest_descriptor =
getDescriptor().getMessageTypes().get(26);
internal_static_ModifyTableRequest_fieldAccessorTable = new
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index c30d92a..d5f4275 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -133,6 +133,7 @@ message EnableTableRequest {
}
message EnableTableResponse {
+ optional uint64 proc_id = 1;
}
message DisableTableRequest {
@@ -140,6 +141,7 @@ message DisableTableRequest {
}
message DisableTableResponse {
+ optional uint64 proc_id = 1;
}
message ModifyTableRequest {
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ff28081..fdbc31c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1675,7 +1675,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
- public void enableTable(final TableName tableName) throws IOException {
+ public long enableTable(final TableName tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preEnableTable(tableName);
@@ -1697,12 +1697,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.postEnableTable(tableName);
}
- // TODO: return procId as part of client-side change
- // return procId;
+ return procId;
}
@Override
- public void disableTable(final TableName tableName) throws IOException {
+ public long disableTable(final TableName tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preDisableTable(tableName);
@@ -1725,8 +1724,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.postDisableTable(tableName);
}
- // TODO: return procId as part of client-side change
- // return procId;
+ return procId;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index abdbf5a..91c406c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -489,11 +489,11 @@ public class MasterRpcServices extends RSRpcServices
public DisableTableResponse disableTable(RpcController controller,
DisableTableRequest request) throws ServiceException {
try {
- master.disableTable(ProtobufUtil.toTableName(request.getTableName()));
+ long procId = master.disableTable(ProtobufUtil.toTableName(request.getTableName()));
+ return DisableTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
- return DisableTableResponse.newBuilder().build();
}
@Override
@@ -575,11 +575,11 @@ public class MasterRpcServices extends RSRpcServices
public EnableTableResponse enableTable(RpcController controller,
EnableTableRequest request) throws ServiceException {
try {
- master.enableTable(ProtobufUtil.toTableName(request.getTableName()));
+ long procId = master.enableTable(ProtobufUtil.toTableName(request.getTableName()));
+ return EnableTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
- return EnableTableResponse.newBuilder().build();
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index d823b35..6153139 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -137,14 +137,14 @@ public interface MasterServices extends Server {
* @param tableName The table name
* @throws IOException
*/
- void enableTable(final TableName tableName) throws IOException;
+ long enableTable(final TableName tableName) throws IOException;
/**
* Disable an existing table
* @param tableName The table name
* @throws IOException
*/
- void disableTable(final TableName tableName) throws IOException;
+ long disableTable(final TableName tableName) throws IOException;
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6512065/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 5b2e50d..00cad06 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -441,10 +441,14 @@ public class TestCatalogJanitor {
throws IOException { }
@Override
- public void enableTable(TableName tableName) throws IOException { }
+ public long enableTable(TableName tableName) throws IOException {
+ return -1;
+ }
@Override
- public void disableTable(TableName tableName) throws IOException { }
+ public long disableTable(TableName tableName) throws IOException {
+ return -1;
+ }
@Override
public void addColumn(TableName tableName, HColumnDescriptor column)
[44/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
index b7af75e,0000000..d891c20
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
@@@ -1,349 -1,0 +1,347 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.*;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Assert;
+
+public class MobSnapshotTestingUtils {
+
+ /**
+ * Create the Mob Table.
+ */
+ public static void createMobTable(final HBaseTestingUtility util,
+ final TableName tableName, int regionReplication,
+ final byte[]... families) throws IOException, InterruptedException {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.setRegionReplication(regionReplication);
+ for (byte[] family : families) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ }
+ byte[][] splitKeys = SnapshotTestingUtils.getSplitKeys();
+ util.getHBaseAdmin().createTable(htd, splitKeys);
+ SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
+ assertEquals((splitKeys.length + 1) * regionReplication, util
+ .getHBaseAdmin().getTableRegions(tableName).size());
+ }
+
+ /**
+ * Create a Mob table.
+ *
+ * @param util
+ * @param tableName
+ * @param families
+ * @return An HTable instance for the created table.
+ * @throws IOException
+ */
- public static HTable createMobTable(final HBaseTestingUtility util,
++ public static Table createMobTable(final HBaseTestingUtility util,
+ final TableName tableName, final byte[]... families) throws IOException {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ for (byte[] family : families) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ // Disable blooms (they are on by default as of 0.95) but we disable them
+ // here because
+ // tests have hard coded counts of what to expect in block cache, etc.,
+ // and blooms being
+ // on is interfering.
+ hcd.setBloomFilterType(BloomType.NONE);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ }
+ util.getHBaseAdmin().createTable(htd);
+ // HBaseAdmin only waits for regions to appear in hbase:meta we should wait
+ // until they are assigned
+ util.waitUntilAllRegionsAssigned(htd.getTableName());
- return new HTable(util.getConfiguration(), htd.getTableName());
++ return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
+ }
+
+ /**
+ * Return the number of rows in the given table.
+ */
- public static int countMobRows(final HTable table) throws IOException {
++ public static int countMobRows(final Table table) throws IOException {
+ Scan scan = new Scan();
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ count++;
+ List<Cell> cells = res.listCells();
+ for (Cell cell : cells) {
+ // Verify the value
+ Assert.assertTrue(CellUtil.cloneValue(cell).length > 0);
+ }
+ }
+ results.close();
+ return count;
+ }
+
+ /**
+ * Return the number of rows in the given table.
+ */
- public static int countMobRows(final HTable table, final byte[]... families)
++ public static int countMobRows(final Table table, final byte[]... families)
+ throws IOException {
+ Scan scan = new Scan();
+ for (byte[] family : families) {
+ scan.addFamily(family);
+ }
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ count++;
+ List<Cell> cells = res.listCells();
+ for (Cell cell : cells) {
+ // Verify the value
+ Assert.assertTrue(CellUtil.cloneValue(cell).length > 0);
+ }
+ }
+ results.close();
+ return count;
+ }
+
+ public static void verifyMobRowCount(final HBaseTestingUtility util,
+ final TableName tableName, long expectedRows) throws IOException {
- HTable table = new HTable(util.getConfiguration(), tableName);
++
++ Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
+ try {
+ assertEquals(expectedRows, countMobRows(table));
+ } finally {
+ table.close();
+ }
+ }
+
+ // ==========================================================================
+ // Snapshot Mock
+ // ==========================================================================
+ public static class SnapshotMock {
+ private final static String TEST_FAMILY = "cf";
+ public final static int TEST_NUM_REGIONS = 4;
+
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final Path rootDir;
+
+ static class RegionData {
+ public HRegionInfo hri;
+ public Path tableDir;
+ public Path[] files;
+
+ public RegionData(final Path tableDir, final HRegionInfo hri,
+ final int nfiles) {
+ this.tableDir = tableDir;
+ this.hri = hri;
+ this.files = new Path[nfiles];
+ }
+ }
+
+ public static class SnapshotBuilder {
+ private final RegionData[] tableRegions;
+ private final SnapshotDescription desc;
+ private final HTableDescriptor htd;
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final Path rootDir;
+ private Path snapshotDir;
+ private int snapshotted = 0;
+
+ public SnapshotBuilder(final Configuration conf, final FileSystem fs,
+ final Path rootDir, final HTableDescriptor htd,
+ final SnapshotDescription desc, final RegionData[] tableRegions)
+ throws IOException {
+ this.fs = fs;
+ this.conf = conf;
+ this.rootDir = rootDir;
+ this.htd = htd;
+ this.desc = desc;
+ this.tableRegions = tableRegions;
+ this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc,
+ rootDir);
+ new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(
+ snapshotDir, new TableDescriptor(htd), false);
+ }
+
+ public HTableDescriptor getTableDescriptor() {
+ return this.htd;
+ }
+
+ public SnapshotDescription getSnapshotDescription() {
+ return this.desc;
+ }
+
+ public Path getSnapshotsDir() {
+ return this.snapshotDir;
+ }
+
+ public Path[] addRegion() throws IOException {
+ return addRegion(desc);
+ }
+
+ public Path[] addRegionV1() throws IOException {
+ return addRegion(desc.toBuilder()
+ .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION).build());
+ }
+
+ public Path[] addRegionV2() throws IOException {
+ return addRegion(desc.toBuilder()
+ .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION).build());
+ }
+
+ private Path[] addRegion(final SnapshotDescription desc)
+ throws IOException {
+ if (this.snapshotted == tableRegions.length) {
+ throw new UnsupportedOperationException(
+ "No more regions in the table");
+ }
+
+ RegionData regionData = tableRegions[this.snapshotted++];
+ ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(
+ desc.getName());
+ SnapshotManifest manifest = SnapshotManifest.create(conf, fs,
+ snapshotDir, desc, monitor);
+ manifest.addRegion(regionData.tableDir, regionData.hri);
+ return regionData.files;
+ }
+
+ public Path commit() throws IOException {
+ ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(
+ desc.getName());
+ SnapshotManifest manifest = SnapshotManifest.create(conf, fs,
+ snapshotDir, desc, monitor);
+ manifest.addTableDescriptor(htd);
+ manifest.consolidate();
+ SnapshotDescriptionUtils.completeSnapshot(desc, rootDir, snapshotDir,
+ fs);
+ snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc,
+ rootDir);
+ return snapshotDir;
+ }
+ }
+
+ public SnapshotMock(final Configuration conf, final FileSystem fs,
+ final Path rootDir) {
+ this.fs = fs;
+ this.conf = conf;
+ this.rootDir = rootDir;
+ }
+
+ public SnapshotBuilder createSnapshotV1(final String snapshotName)
+ throws IOException {
+ return createSnapshot(snapshotName, SnapshotManifestV1.DESCRIPTOR_VERSION);
+ }
+
+ public SnapshotBuilder createSnapshotV2(final String snapshotName)
+ throws IOException {
+ return createSnapshot(snapshotName, SnapshotManifestV2.DESCRIPTOR_VERSION);
+ }
+
+ private SnapshotBuilder createSnapshot(final String snapshotName,
+ final int version) throws IOException {
+ HTableDescriptor htd = createHtd(snapshotName);
+
+ RegionData[] regions = createTable(htd, TEST_NUM_REGIONS);
+
+ SnapshotDescription desc = SnapshotDescription.newBuilder()
+ .setTable(htd.getNameAsString()).setName(snapshotName)
+ .setVersion(version).build();
+
+ Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc,
+ rootDir);
+ SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
+ return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
+ }
+
+ public HTableDescriptor createHtd(final String tableName) {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ return htd;
+ }
+
+ private RegionData[] createTable(final HTableDescriptor htd,
+ final int nregions) throws IOException {
+ Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
+ new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(
+ tableDir, new TableDescriptor(htd), false);
+
+ assertTrue(nregions % 2 == 0);
+ RegionData[] regions = new RegionData[nregions];
+ for (int i = 0; i < regions.length; i += 2) {
+ byte[] startKey = Bytes.toBytes(0 + i * 2);
+ byte[] endKey = Bytes.toBytes(1 + i * 2);
+
+ // First region, simple with one plain hfile.
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
+ HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(
+ conf, fs, tableDir, hri);
+ regions[i] = new RegionData(tableDir, hri, 3);
+ for (int j = 0; j < regions[i].files.length; ++j) {
+ Path storeFile = createStoreFile(rfs.createTempName());
+ regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
+ }
+
+ // Second region, used to test the split case.
+ // This region contains a reference to the hfile in the first region.
+ startKey = Bytes.toBytes(2 + i * 2);
+ endKey = Bytes.toBytes(3 + i * 2);
+ hri = new HRegionInfo(htd.getTableName());
+ rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir,
+ hri);
+ regions[i + 1] = new RegionData(tableDir, hri, regions[i].files.length);
+ for (int j = 0; j < regions[i].files.length; ++j) {
+ String refName = regions[i].files[j].getName() + '.'
+ + regions[i].hri.getEncodedName();
+ Path refFile = createStoreFile(new Path(rootDir, refName));
+ regions[i + 1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
+ }
+ }
+ return regions;
+ }
+
+ private Path createStoreFile(final Path storeFile) throws IOException {
+ FSDataOutputStream out = fs.create(storeFile);
+ try {
+ out.write(Bytes.toBytes(storeFile.toString()));
+ } finally {
+ out.close();
+ }
+ return storeFile;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
index 5517f4a,0000000..f7a9918
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
@@@ -1,551 -1,0 +1,548 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
- import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.ScannerCallable;
- import org.apache.hadoop.hbase.ipc.RpcClient;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.log4j.Level;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test creating/using/deleting snapshots from the client
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ *
+ * TODO This is essentially a clone of TestSnapshotFromClient. This is worth refactoring this
+ * because there will be a few more flavors of snapshots that need to run these tests.
+ */
+@Category({ClientTests.class, LargeTests.class})
+public class TestMobFlushSnapshotFromClient {
+ private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final int NUM_RS = 2;
+ private static final String STRING_TABLE_NAME = "test";
+ private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+ private static final TableName TABLE_NAME =
+ TableName.valueOf(STRING_TABLE_NAME);
+ private final int DEFAULT_NUM_ROWS = 100;
+
+ /**
+ * Setup the config for the cluster
+ * @throws Exception on failure
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(NUM_RS);
+ }
+
+ private static void setupConf(Configuration conf) {
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // change the flush size to a small amount, regulating number of store files
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // so make sure we get a compaction when doing a load, but keep around some
+ // files in the store
+ conf.setInt("hbase.hstore.compaction.min", 10);
+ conf.setInt("hbase.hstore.compactionThreshold", 10);
+ // block writes if we get to 12 store files
+ conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+ // Enable snapshot
+ conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+ ConstantSizeRegionSplitPolicy.class.getName());
+ conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ UTIL.deleteTable(TABLE_NAME);
+
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ /**
+ * Test simple flush snapshotting a table that is online
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testFlushTableSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the enabled table
+ String snapshotString = "offlineTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(snapshotString);
+ admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH);
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+ }
+
+ /**
+ * Test snapshotting a table that is online without flushing
+ * @throws Exception
+ */
+ @Test(timeout=30000)
+ public void testSkipFlushTableSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ UTIL.loadTable(table, TEST_FAM);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the enabled table
+ String snapshotString = "skipFlushTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(snapshotString);
+ admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+
+ admin.deleteSnapshot(snapshot);
+ snapshots = admin.listSnapshots();
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ }
+
+
+ /**
+ * Test simple flush snapshotting a table that is online
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testFlushTableSnapshotWithProcedure() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the enabled table
+ String snapshotString = "offlineTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(snapshotString);
+ Map<String, String> props = new HashMap<String, String>();
+ props.put("table", TABLE_NAME.getNameAsString());
+ admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION,
+ snapshotString, props);
+
+
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+ }
+
+ @Test (timeout=300000)
+ public void testSnapshotFailsOnNonExistantTable() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ TableName tableName = TableName.valueOf("_not_a_table");
+
+ // make sure the table doesn't exist
+ boolean fail = false;
+ do {
+ try {
+ admin.getTableDescriptor(tableName);
+ fail = true;
+ LOG.error("Table:" + tableName + " already exists, checking a new name");
+ tableName = TableName.valueOf(tableName+"!");
+ } catch (TableNotFoundException e) {
+ fail = false;
+ }
+ } while (fail);
+
+ // snapshot the non-existant table
+ try {
+ admin.snapshot("fail", tableName, SnapshotDescription.Type.FLUSH);
+ fail("Snapshot succeeded even though there is not table.");
+ } catch (SnapshotCreationException e) {
+ LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
+ }
+ }
+
+ @Test(timeout = 300000)
+ public void testAsyncFlushSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
+ .setTable(TABLE_NAME.getNameAsString())
+ .setType(SnapshotDescription.Type.FLUSH)
+ .build();
+
+ // take the snapshot async
+ admin.takeSnapshotAsync(snapshot);
+
+ // constantly loop, looking for the snapshot to complete
+ HMaster master = UTIL.getMiniHBaseCluster().getMaster();
+ SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
+ LOG.info(" === Async Snapshot Completed ===");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ // make sure we get the snapshot
+ SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
+ }
+
+ @Test (timeout=300000)
+ public void testSnapshotStateAfterMerge() throws Exception {
+ int numRows = DEFAULT_NUM_ROWS;
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
+
+ // Take a snapshot
+ String snapshotBeforeMergeName = "snapshotBeforeMerge";
+ admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH);
+
+ // Clone the table
+ TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
+ admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);
+
+ // Merge two regions
+ List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
+ Collections.sort(regions, new Comparator<HRegionInfo>() {
+ public int compare(HRegionInfo r1, HRegionInfo r2) {
+ return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
+ }
+ });
+
+ int numRegions = admin.getTableRegions(TABLE_NAME).size();
+ int numRegionsAfterMerge = numRegions - 2;
+ admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
+ regions.get(2).getEncodedNameAsBytes(), true);
+ admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
+ regions.get(6).getEncodedNameAsBytes(), true);
+
+ // Verify that there's one region less
+ waitRegionsAfterMerge(numRegionsAfterMerge);
+ assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
+
+ // Clone the table
+ TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
+ admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
+
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneBeforeMergeName, numRows);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneAfterMergeName, numRows);
+
+ // test that we can delete the snapshot
+ UTIL.deleteTable(cloneAfterMergeName);
+ UTIL.deleteTable(cloneBeforeMergeName);
+ }
+
+ @Test (timeout=300000)
+ public void testTakeSnapshotAfterMerge() throws Exception {
+ int numRows = DEFAULT_NUM_ROWS;
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
+
+ // Merge two regions
+ List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
+ Collections.sort(regions, new Comparator<HRegionInfo>() {
+ public int compare(HRegionInfo r1, HRegionInfo r2) {
+ return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
+ }
+ });
+
+ int numRegions = admin.getTableRegions(TABLE_NAME).size();
+ int numRegionsAfterMerge = numRegions - 2;
+ admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
+ regions.get(2).getEncodedNameAsBytes(), true);
+ admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
+ regions.get(6).getEncodedNameAsBytes(), true);
+
+ waitRegionsAfterMerge(numRegionsAfterMerge);
+ assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
+
+ // Take a snapshot
+ String snapshotName = "snapshotAfterMerge";
+ SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
+ SnapshotDescription.Type.FLUSH, 3);
+
+ // Clone the table
+ TableName cloneName = TableName.valueOf("cloneMerge");
+ admin.cloneSnapshot(snapshotName, cloneName);
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
+
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneName, numRows);
+
+ // test that we can delete the snapshot
+ UTIL.deleteTable(cloneName);
+ }
+
+ /**
+ * Basic end-to-end test of simple-flush-based snapshots
+ */
+ @Test (timeout=300000)
+ public void testFlushCreateListDestroy() throws Exception {
+ LOG.debug("------- Starting Snapshot test -------------");
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ String snapshotName = "flushSnapshotCreateListDestroy";
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, Bytes.toString(TEST_FAM),
+ snapshotName, rootDir, fs, true);
+ }
+
+ /**
+ * Demonstrate that we reject snapshot requests if there is a snapshot already running on the
+ * same table currently running and that concurrent snapshots on different tables can both
- * succeed concurretly.
++ * succeed concurrently.
+ */
+ @Test(timeout=300000)
+ public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
+ final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2";
+ final TableName TABLE2_NAME =
+ TableName.valueOf(STRING_TABLE2_NAME);
+
+ int ssNum = 20;
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // create second testing table
+ SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+ SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
+ // We'll have one of these per thread
+ class SSRunnable implements Runnable {
+ SnapshotDescription ss;
+ SSRunnable(SnapshotDescription ss) {
+ this.ss = ss;
+ }
+
+ @Override
+ public void run() {
+ try {
+ Admin admin = UTIL.getHBaseAdmin();
+ LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
+ admin.takeSnapshotAsync(ss);
+ } catch (Exception e) {
+ LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString(
+ ss)
+ + ". This is ok, we expect some", e);
+ }
+ LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
+ toBeSubmitted.countDown();
+ }
+ };
+
+ // build descriptions
+ SnapshotDescription[] descs = new SnapshotDescription[ssNum];
+ for (int i = 0; i < ssNum; i++) {
+ SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+ builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString());
+ builder.setName("ss"+i);
+ builder.setType(SnapshotDescription.Type.FLUSH);
+ descs[i] = builder.build();
+ }
+
+ // kick each off its own thread
+ for (int i=0 ; i < ssNum; i++) {
+ new Thread(new SSRunnable(descs[i])).start();
+ }
+
+ // wait until all have been submitted
+ toBeSubmitted.await();
+
+ // loop until all are done.
+ while (true) {
+ int doneCount = 0;
+ for (SnapshotDescription ss : descs) {
+ try {
+ if (admin.isSnapshotFinished(ss)) {
+ doneCount++;
+ }
+ } catch (Exception e) {
+ LOG.warn("Got an exception when checking for snapshot " + ss.getName(), e);
+ doneCount++;
+ }
+ }
+ if (doneCount == descs.length) {
+ break;
+ }
+ Thread.sleep(100);
+ }
+
+ // dump for debugging
+ logFSTree(FSUtils.getRootDir(UTIL.getConfiguration()));
+
+ List<SnapshotDescription> taken = admin.listSnapshots();
+ int takenSize = taken.size();
+ LOG.info("Taken " + takenSize + " snapshots: " + taken);
+ assertTrue("We expect at least 1 request to be rejected because of we concurrently" +
+ " issued many requests", takenSize < ssNum && takenSize > 0);
+
+ // Verify that there's at least one snapshot per table
+ int t1SnapshotsCount = 0;
+ int t2SnapshotsCount = 0;
+ for (SnapshotDescription ss : taken) {
+ if (TableName.valueOf(ss.getTable()).equals(TABLE_NAME)) {
+ t1SnapshotsCount++;
+ } else if (TableName.valueOf(ss.getTable()).equals(TABLE2_NAME)) {
+ t2SnapshotsCount++;
+ }
+ }
+ assertTrue("We expect at least 1 snapshot of table1 ", t1SnapshotsCount > 0);
+ assertTrue("We expect at least 1 snapshot of table2 ", t2SnapshotsCount > 0);
+
+ UTIL.deleteTable(TABLE2_NAME);
+ }
+
+ private void logFSTree(Path root) throws IOException {
+ FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
+ }
+
+ private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
+ throws IOException, InterruptedException {
+ Admin admin = UTIL.getHBaseAdmin();
+ // Verify that there's one region less
+ long startTime = System.currentTimeMillis();
+ while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) {
+ // This may be flaky... if after 15sec the merge is not complete give up
+ // it will fail in the assertEquals(numRegionsAfterMerge).
+ if ((System.currentTimeMillis() - startTime) > 15000)
+ break;
+ Thread.sleep(100);
+ }
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
index d281763,0000000..cb58b17
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
@@@ -1,210 -1,0 +1,211 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
++import org.apache.hadoop.hbase.client.ConnectionFactory;
++import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test clone/restore snapshots from the client
+ *
+ * TODO This is essentially a clone of TestRestoreSnapshotFromClient. This is worth refactoring
+ * this because there will be a few more flavors of snapshots that need to run these tests.
+ */
+@Category({ClientTests.class,LargeTests.class})
+public class TestMobRestoreFlushSnapshotFromClient {
+ final Log LOG = LogFactory.getLog(getClass());
+
+ private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private final byte[] FAMILY = Bytes.toBytes("cf");
+
+ private byte[] snapshotName0;
+ private byte[] snapshotName1;
+ private byte[] snapshotName2;
+ private int snapshot0Rows;
+ private int snapshot1Rows;
+ private TableName tableName;
+ private Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+ UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+ UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+ UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+ UTIL.getConfiguration().setBoolean(
+ "hbase.master.enabletable.roundrobin", true);
+
+ // Enable snapshot
+ UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ UTIL.getConfiguration().setLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
+ RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT * 2);
+
+ UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+
+ UTIL.startMiniCluster(3);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * Initialize the tests with a table filled with some data
+ * and two snapshots (snapshotName0, snapshotName1) of different states.
+ * The tableName, snapshotNames and the number of rows in the snapshot are initialized.
+ */
+ @Before
+ public void setup() throws Exception {
+ this.admin = UTIL.getHBaseAdmin();
+
+ long tid = System.currentTimeMillis();
+ tableName = TableName.valueOf("testtb-" + tid);
+ snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+ snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+ snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
+
+ // create Table
+ MobSnapshotTestingUtils.createMobTable(UTIL, tableName, 1, FAMILY);
+
- HTable table = new HTable(UTIL.getConfiguration(), tableName);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(tableName);
+ SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
+ snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table);
+ LOG.info("=== before snapshot with 500 rows");
+ logFSTree();
+
+ // take a snapshot
+ admin.snapshot(Bytes.toString(snapshotName0), tableName,
+ SnapshotDescription.Type.FLUSH);
+
+ LOG.info("=== after snapshot with 500 rows");
+ logFSTree();
+
+ // insert more data
+ SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
+ snapshot1Rows = MobSnapshotTestingUtils.countMobRows(table);
+ LOG.info("=== before snapshot with 1000 rows");
+ logFSTree();
+
+ // take a snapshot of the updated table
+ admin.snapshot(Bytes.toString(snapshotName1), tableName,
+ SnapshotDescription.Type.FLUSH);
+ LOG.info("=== after snapshot with 1000 rows");
+ logFSTree();
+ table.close();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @Test
+ public void testTakeFlushSnapshot() throws IOException {
+ // taking happens in setup.
+ }
+
+ @Test
+ public void testRestoreSnapshot() throws IOException {
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot1Rows);
+
+ // Restore from snapshot-0
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName0);
+ logFSTree();
+ admin.enableTable(tableName);
+ LOG.info("=== after restore with 500 row snapshot");
+ logFSTree();
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot0Rows);
+
+ // Restore from snapshot-1
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName1);
+ admin.enableTable(tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot1Rows);
+ }
+
+ @Test(expected=SnapshotDoesNotExistException.class)
+ public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
+ String snapshotName = "random-snapshot-" + System.currentTimeMillis();
+ TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName, tableName);
+ }
+
+ @Test
+ public void testCloneSnapshot() throws IOException, InterruptedException {
+ TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+ testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
+ testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
+ }
+
+ private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName,
+ int snapshotRows) throws IOException, InterruptedException {
+ // create a new table from snapshot
+ admin.cloneSnapshot(snapshotName, tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshotRows);
+
+ UTIL.deleteTable(tableName);
+ }
+
+ @Test
+ public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
+ TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName0, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, clonedTableName, snapshot0Rows);
+ admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH);
+ UTIL.deleteTable(clonedTableName);
+
+ admin.cloneSnapshot(snapshotName2, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, clonedTableName, snapshot0Rows);
+ UTIL.deleteTable(clonedTableName);
+ }
+
+ // ==========================================================================
+ // Helpers
+ // ==========================================================================
+ private void logFSTree() throws IOException {
+ MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+ FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
index 1893c7a,0000000..70b4312
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
@@@ -1,163 -1,0 +1,159 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
- import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils.SnapshotMock;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test the restore/clone operation from a file-system point of view.
+ */
+@Category(SmallTests.class)
+public class TestMobRestoreSnapshotHelper {
+ final Log LOG = LogFactory.getLog(getClass());
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private final static String TEST_HFILE = "abc";
+
+ private Configuration conf;
- private Path archiveDir;
+ private FileSystem fs;
+ private Path rootDir;
+
+ @Before
+ public void setup() throws Exception {
+ rootDir = TEST_UTIL.getDataTestDir("testRestore");
- archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
+ fs = TEST_UTIL.getTestFileSystem();
+ TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ conf = TEST_UTIL.getConfiguration();
+ FSUtils.setRootDir(conf, rootDir);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ fs.delete(TEST_UTIL.getDataTestDir(), true);
+ }
+
+ @Test
+ public void testRestore() throws IOException {
+ // Test Rolling-Upgrade like Snapshot.
+ // half machines writing using v1 and the others using v2 format.
+ SnapshotMock snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
+ SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("snapshot");
+ builder.addRegionV1();
+ builder.addRegionV2();
+ builder.addRegionV2();
+ builder.addRegionV1();
+ Path snapshotDir = builder.commit();
+ HTableDescriptor htd = builder.getTableDescriptor();
+ SnapshotDescription desc = builder.getSnapshotDescription();
+
+ // Test clone a snapshot
+ HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
+ testRestore(snapshotDir, desc, htdClone);
+ verifyRestore(rootDir, htd, htdClone);
+
+ // Test clone a clone ("link to link")
+ SnapshotDescription cloneDesc = SnapshotDescription.newBuilder()
+ .setName("cloneSnapshot")
+ .setTable("testtb-clone")
+ .build();
+ Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
+ HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
+ testRestore(cloneDir, cloneDesc, htdClone2);
+ verifyRestore(rootDir, htd, htdClone2);
+ }
+
+ private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
+ final HTableDescriptor htdClone) throws IOException {
+ String[] files = SnapshotTestingUtils.listHFileNames(fs,
+ FSUtils.getTableDir(rootDir, htdClone.getTableName()));
+ assertEquals(12, files.length);
+ for (int i = 0; i < files.length; i += 2) {
+ String linkFile = files[i];
+ String refFile = files[i+1];
+ assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
+ assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
+ assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));
+ Path refPath = getReferredToFile(refFile);
+ LOG.debug("get reference name for file " + refFile + " = " + refPath);
+ assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName()));
+ assertEquals(linkFile, refPath.getName());
+ }
+ }
+
+ /**
+ * Execute the restore operation
+ * @param snapshotDir The snapshot directory to use as "restore source"
+ * @param sd The snapshot descriptor
+ * @param htdClone The HTableDescriptor of the table to restore/clone.
+ */
+ public void testRestore(final Path snapshotDir, final SnapshotDescription sd,
+ final HTableDescriptor htdClone) throws IOException {
+ LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ new FSTableDescriptors(conf).createTableDescriptor(htdClone);
+ RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
+ helper.restoreHdfsRegions();
+
+ LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+ }
+
+ /**
+ * Initialize the restore helper, based on the snapshot and table information provided.
+ */
+ private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
+ final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
+ ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
+ MonitoredTask status = Mockito.mock(MonitoredTask.class);
+
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
+ return new RestoreSnapshotHelper(conf, fs, manifest,
+ htdClone, rootDir, monitor, status);
+ }
+
+ private Path getReferredToFile(final String referenceName) {
+ Path fakeBasePath = new Path(new Path("table", "region"), "cf");
+ return StoreFileInfo.getReferredToFile(new Path(fakeBasePath, referenceName));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
index 6ce4252,0000000..006316a
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
@@@ -1,73 -1,0 +1,73 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator;
+
+/**
+ * A load test data generator for MOB
+ */
+public class LoadTestDataGeneratorWithMOB
+ extends MultiThreadedAction.DefaultDataGenerator {
+
+ private byte[] mobColumnFamily;
+ private LoadTestKVGenerator mobKvGenerator;
+
+ public LoadTestDataGeneratorWithMOB(int minValueSize, int maxValueSize,
+ int minColumnsPerKey, int maxColumnsPerKey, byte[]... columnFamilies) {
+ super(minValueSize, maxValueSize, minColumnsPerKey, maxColumnsPerKey,
+ columnFamilies);
+ }
+
+ public LoadTestDataGeneratorWithMOB(byte[]... columnFamilies) {
+ super(columnFamilies);
+ }
+
+ @Override
+ public void initialize(String[] args) {
+ super.initialize(args);
+ if (args.length != 3) {
+ throw new IllegalArgumentException(
+ "LoadTestDataGeneratorWithMOB can have 3 arguments."
- + "1st arguement is a column family, the 2nd argument "
++ + "1st argument is a column family, the 2nd argument "
+ + "is the minimum mob data size and the 3rd argument "
+ + "is the maximum mob data size.");
+ }
+ String mobColumnFamily = args[0];
+ int minMobDataSize = Integer.parseInt(args[1]);
+ int maxMobDataSize = Integer.parseInt(args[2]);
+ configureMob(Bytes.toBytes(mobColumnFamily), minMobDataSize, maxMobDataSize);
+ }
+
+ private void configureMob(byte[] mobColumnFamily, int minMobDataSize,
+ int maxMobDataSize) {
+ this.mobColumnFamily = mobColumnFamily;
+ mobKvGenerator = new LoadTestKVGenerator(minMobDataSize, maxMobDataSize);
+ }
+
+ @Override
+ public byte[] generateValue(byte[] rowKey, byte[] cf,
+ byte[] column) {
+ if(Arrays.equals(cf, mobColumnFamily))
+ return mobKvGenerator.generateRandomSizeValue(rowKey, cf, column);
+
+ return super.generateValue(rowKey, cf, column);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/pom.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/src/main/asciidoc/book.adoc
----------------------------------------------------------------------
[48/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
index d286b72,0000000..37d4461
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java
@@@ -1,180 -1,0 +1,182 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagType;
++import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
++import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.SweepCounter;
+import org.apache.hadoop.hbase.mob.mapreduce.SweepReducer.SweepPartitionId;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+import org.apache.hadoop.hbase.regionserver.MemStore;
+import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.mapreduce.Reducer.Context;
+
+/**
+ * The wrapper of a DefaultMemStore.
+ * This wrapper is used in the sweep reducer to buffer and sort the cells written from
+ * the invalid and small mob files.
+ * It's flushed when it's full, the mob data are written to the mob files, and their file names
+ * are written back to store files of HBase.
+ * This memStore is used to sort the cells in mob files.
+ * In a reducer of sweep tool, the mob files are grouped by the same prefix (start key and date),
+ * in each group, the reducer iterates the files and read the cells to a new and bigger mob file.
+ * The cells in the same mob file are ordered, but cells across mob files are not.
+ * So we need this MemStoreWrapper to sort those cells come from different mob files before
+ * flushing them to the disk, when the memStore is big enough it's flushed as a new mob file.
+ */
+@InterfaceAudience.Private
+public class MemStoreWrapper {
+
+ private static final Log LOG = LogFactory.getLog(MemStoreWrapper.class);
+
+ private MemStore memstore;
+ private long flushSize;
+ private SweepPartitionId partitionId;
+ private Context context;
+ private Configuration conf;
- private HTable table;
++ private BufferedMutator table;
+ private HColumnDescriptor hcd;
+ private Path mobFamilyDir;
+ private FileSystem fs;
+ private CacheConfig cacheConfig;
+
- public MemStoreWrapper(Context context, FileSystem fs, HTable table, HColumnDescriptor hcd,
++ public MemStoreWrapper(Context context, FileSystem fs, BufferedMutator table, HColumnDescriptor hcd,
+ MemStore memstore, CacheConfig cacheConfig) throws IOException {
+ this.memstore = memstore;
+ this.context = context;
+ this.fs = fs;
+ this.table = table;
+ this.hcd = hcd;
+ this.conf = context.getConfiguration();
+ this.cacheConfig = cacheConfig;
+ flushSize = this.conf.getLong(MobConstants.MOB_SWEEP_TOOL_COMPACTION_MEMSTORE_FLUSH_SIZE,
+ MobConstants.DEFAULT_MOB_SWEEP_TOOL_COMPACTION_MEMSTORE_FLUSH_SIZE);
+ mobFamilyDir = MobUtils.getMobFamilyPath(conf, table.getName(), hcd.getNameAsString());
+ }
+
+ public void setPartitionId(SweepPartitionId partitionId) {
+ this.partitionId = partitionId;
+ }
+
+ /**
+ * Flushes the memstore if the size is large enough.
+ * @throws IOException
+ */
+ private void flushMemStoreIfNecessary() throws IOException {
+ if (memstore.heapSize() >= flushSize) {
+ flushMemStore();
+ }
+ }
+
+ /**
+ * Flushes the memstore anyway.
+ * @throws IOException
+ */
+ public void flushMemStore() throws IOException {
+ MemStoreSnapshot snapshot = memstore.snapshot();
+ internalFlushCache(snapshot);
+ memstore.clearSnapshot(snapshot.getId());
+ }
+
+ /**
+ * Flushes the snapshot of the memstore.
+ * Flushes the mob data to the mob files, and flushes the name of these mob files to HBase.
+ * @param snapshot The snapshot of the memstore.
+ * @throws IOException
+ */
+ private void internalFlushCache(final MemStoreSnapshot snapshot)
+ throws IOException {
+ if (snapshot.getCellsCount() == 0) {
+ return;
+ }
+ // generate the files into a temp directory.
+ String tempPathString = context.getConfiguration().get(SweepJob.WORKING_FILES_DIR_KEY);
+ StoreFile.Writer mobFileWriter = MobUtils.createWriter(conf, fs, hcd,
+ partitionId.getDate(), new Path(tempPathString), snapshot.getCellsCount(),
+ hcd.getCompactionCompression(), partitionId.getStartKey(), cacheConfig);
+
+ String relativePath = mobFileWriter.getPath().getName();
+ LOG.info("Create files under a temp directory " + mobFileWriter.getPath().toString());
+
+ byte[] referenceValue = Bytes.toBytes(relativePath);
+ KeyValueScanner scanner = snapshot.getScanner();
+ Cell cell = null;
+ while (null != (cell = scanner.next())) {
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ mobFileWriter.append(kv);
+ }
+ scanner.close();
+ // Write out the log sequence number that corresponds to this output
+ // hfile. The hfile is current up to and including logCacheFlushId.
+ mobFileWriter.appendMetadata(Long.MAX_VALUE, false, snapshot.getCellsCount());
+ mobFileWriter.close();
+
+ MobUtils.commitFile(conf, fs, mobFileWriter.getPath(), mobFamilyDir, cacheConfig);
+ context.getCounter(SweepCounter.FILE_AFTER_MERGE_OR_CLEAN).increment(1);
+ // write reference/fileName back to the store files of HBase.
+ scanner = snapshot.getScanner();
+ scanner.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW));
+ cell = null;
- Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, this.table.getTableName());
++ Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, Bytes.toBytes(this.table.getName().toString()));
+ while (null != (cell = scanner.next())) {
+ KeyValue reference = MobUtils.createMobRefKeyValue(cell, referenceValue, tableNameTag);
+ Put put =
+ new Put(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength());
+ put.add(reference);
- table.put(put);
++ table.mutate(put);
+ context.getCounter(SweepCounter.RECORDS_UPDATED).increment(1);
+ }
- table.flushCommits();
++ table.flush();
+ scanner.close();
+ }
+
+ /**
+ * Adds a KeyValue into the memstore.
+ * @param kv The KeyValue to be added.
+ * @throws IOException
+ */
+ public void addToMemstore(KeyValue kv) throws IOException {
+ memstore.add(kv);
+ // flush the memstore if it's full.
+ flushMemStoreIfNecessary();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
index 73ca1a2,0000000..cbefd8a
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java
@@@ -1,512 -1,0 +1,509 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.HBaseAdmin;
- import org.apache.hadoop.hbase.client.HTable;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobFile;
+import org.apache.hadoop.hbase.mob.MobFileName;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.DummyMobAbortable;
+import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.SweepCounter;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.DefaultMemStore;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * The reducer of a sweep job.
+ * This reducer merges the small mob files into bigger ones, and write visited
+ * names of mob files to a sequence file which is used by the sweep job to delete
+ * the unused mob files.
+ * The key of the input is a file name, the value is a collection of KeyValue where
+ * the KeyValue is the actual cell (its format is valueLength + fileName) in HBase.
+ * In this reducer, we could know how many cells exist in HBase for a mob file.
+ * If the existCellSize/mobFileSize < compactionRatio, this mob
+ * file needs to be merged.
+ */
+@InterfaceAudience.Private
+public class SweepReducer extends Reducer<Text, KeyValue, Writable, Writable> {
+
+ private static final Log LOG = LogFactory.getLog(SweepReducer.class);
+
+ private SequenceFile.Writer writer = null;
+ private MemStoreWrapper memstore;
+ private Configuration conf;
+ private FileSystem fs;
+
+ private Path familyDir;
+ private CacheConfig cacheConfig;
+ private long compactionBegin;
- private HTable table;
++ private BufferedMutator table;
+ private HColumnDescriptor family;
+ private long mobCompactionDelay;
+ private Path mobTableDir;
+
+ @Override
+ protected void setup(Context context) throws IOException, InterruptedException {
+ this.conf = context.getConfiguration();
++ Connection c = ConnectionFactory.createConnection(this.conf);
+ this.fs = FileSystem.get(conf);
+ // the MOB_SWEEP_JOB_DELAY is ONE_DAY by default. Its value is only changed when testing.
+ mobCompactionDelay = conf.getLong(SweepJob.MOB_SWEEP_JOB_DELAY, SweepJob.ONE_DAY);
+ String tableName = conf.get(TableInputFormat.INPUT_TABLE);
+ String familyName = conf.get(TableInputFormat.SCAN_COLUMN_FAMILY);
+ TableName tn = TableName.valueOf(tableName);
+ this.familyDir = MobUtils.getMobFamilyPath(conf, tn, familyName);
- HBaseAdmin admin = new HBaseAdmin(this.conf);
++ Admin admin = c.getAdmin();
+ try {
+ family = admin.getTableDescriptor(tn).getFamily(Bytes.toBytes(familyName));
+ if (family == null) {
+ // this column family might be removed, directly return.
+ throw new InvalidFamilyOperationException("Column family '" + familyName
+ + "' does not exist. It might be removed.");
+ }
+ } finally {
+ try {
+ admin.close();
+ } catch (IOException e) {
+ LOG.warn("Fail to close the HBaseAdmin", e);
+ }
+ }
+ // disable the block cache.
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
+ this.cacheConfig = new CacheConfig(copyOfConf);
+
- table = new HTable(this.conf, Bytes.toBytes(tableName));
- table.setAutoFlush(false, false);
-
- table.setWriteBufferSize(1 * 1024 * 1024); // 1MB
++ table = c.getBufferedMutator(new BufferedMutatorParams(tn).writeBufferSize(1*1024*1024));
+ memstore = new MemStoreWrapper(context, fs, table, family, new DefaultMemStore(), cacheConfig);
+
+ // The start time of the sweep tool.
+ // Only the mob files whose creation time is older than startTime-oneDay will be handled by the
+ // reducer since it brings inconsistency to handle the latest mob files.
+ this.compactionBegin = conf.getLong(MobConstants.MOB_SWEEP_TOOL_COMPACTION_START_DATE, 0);
+ mobTableDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), tn);
+ }
+
+ private SweepPartition createPartition(SweepPartitionId id, Context context) throws IOException {
+ return new SweepPartition(id, context);
+ }
+
+ @Override
+ public void run(Context context) throws IOException, InterruptedException {
+ String jobId = context.getConfiguration().get(SweepJob.SWEEP_JOB_ID);
+ String owner = context.getConfiguration().get(SweepJob.SWEEP_JOB_SERVERNAME);
+ String sweeperNode = context.getConfiguration().get(SweepJob.SWEEP_JOB_TABLE_NODE);
+ ZooKeeperWatcher zkw = new ZooKeeperWatcher(context.getConfiguration(), jobId,
+ new DummyMobAbortable());
+ FSDataOutputStream fout = null;
+ try {
+ SweepJobNodeTracker tracker = new SweepJobNodeTracker(zkw, sweeperNode, owner);
+ tracker.start();
+ setup(context);
+ // create a sequence contains all the visited file names in this reducer.
+ String dir = this.conf.get(SweepJob.WORKING_VISITED_DIR_KEY);
+ Path nameFilePath = new Path(dir, UUID.randomUUID().toString()
+ .replace("-", MobConstants.EMPTY_STRING));
+ fout = fs.create(nameFilePath, true);
+ writer = SequenceFile.createWriter(context.getConfiguration(), fout, String.class,
+ String.class, CompressionType.NONE, null);
+ SweepPartitionId id;
+ SweepPartition partition = null;
+ // the mob files which have the same start key and date are in the same partition.
+ while (context.nextKey()) {
+ Text key = context.getCurrentKey();
+ String keyString = key.toString();
+ id = SweepPartitionId.create(keyString);
+ if (null == partition || !id.equals(partition.getId())) {
+ // It's the first mob file in the current partition.
+ if (null != partition) {
+ // this mob file is in different partitions with the previous mob file.
+ // directly close.
+ partition.close();
+ }
+ // create a new one
+ partition = createPartition(id, context);
+ }
+ if (partition != null) {
+ // run the partition
+ partition.execute(key, context.getValues());
+ }
+ }
+ if (null != partition) {
+ partition.close();
+ }
+ writer.hflush();
+ } catch (KeeperException e) {
+ throw new IOException(e);
+ } finally {
+ cleanup(context);
+ zkw.close();
+ if (writer != null) {
+ IOUtils.closeStream(writer);
+ }
+ if (fout != null) {
+ IOUtils.closeStream(fout);
+ }
+ if (table != null) {
+ try {
+ table.close();
+ } catch (IOException e) {
+ LOG.warn(e);
+ }
+ }
+ }
+
+ }
+
+ /**
+ * The mob files which have the same start key and date are in the same partition.
+ * The files in the same partition are merged together into bigger ones.
+ */
+ public class SweepPartition {
+
+ private final SweepPartitionId id;
+ private final Context context;
+ private boolean memstoreUpdated = false;
+ private boolean mergeSmall = false;
+ private final Map<String, MobFileStatus> fileStatusMap = new HashMap<String, MobFileStatus>();
+ private final List<Path> toBeDeleted = new ArrayList<Path>();
+
+ public SweepPartition(SweepPartitionId id, Context context) throws IOException {
+ this.id = id;
+ this.context = context;
+ memstore.setPartitionId(id);
+ init();
+ }
+
+ public SweepPartitionId getId() {
+ return this.id;
+ }
+
+ /**
+ * Prepares the map of files.
+ *
+ * @throws IOException
+ */
+ private void init() throws IOException {
+ FileStatus[] fileStats = listStatus(familyDir, id.getStartKey());
+ if (null == fileStats) {
+ return;
+ }
+
+ int smallFileCount = 0;
+ float compactionRatio = conf.getFloat(MobConstants.MOB_SWEEP_TOOL_COMPACTION_RATIO,
+ MobConstants.DEFAULT_SWEEP_TOOL_MOB_COMPACTION_RATIO);
+ long compactionMergeableSize = conf.getLong(
+ MobConstants.MOB_SWEEP_TOOL_COMPACTION_MERGEABLE_SIZE,
+ MobConstants.DEFAULT_SWEEP_TOOL_MOB_COMPACTION_MERGEABLE_SIZE);
+ // list the files. Just merge the hfiles, don't merge the hfile links.
+ // prepare the map of mob files. The key is the file name, the value is the file status.
+ for (FileStatus fileStat : fileStats) {
+ MobFileStatus mobFileStatus = null;
+ if (!HFileLink.isHFileLink(fileStat.getPath())) {
+ mobFileStatus = new MobFileStatus(fileStat, compactionRatio, compactionMergeableSize);
+ if (mobFileStatus.needMerge()) {
+ smallFileCount++;
+ }
+ // key is file name (not hfile name), value is hfile status.
+ fileStatusMap.put(fileStat.getPath().getName(), mobFileStatus);
+ }
+ }
+ if (smallFileCount >= 2) {
+ // merge the files only when there're more than 1 files in the same partition.
+ this.mergeSmall = true;
+ }
+ }
+
+ /**
+ * Flushes the data into mob files and store files, and archives the small
+ * files after they're merged.
+ * @throws IOException
+ */
+ public void close() throws IOException {
+ if (null == id) {
+ return;
+ }
+ // flush remain key values into mob files
+ if (memstoreUpdated) {
+ memstore.flushMemStore();
+ }
+ List<StoreFile> storeFiles = new ArrayList<StoreFile>(toBeDeleted.size());
+ // delete samll files after compaction
+ for (Path path : toBeDeleted) {
+ LOG.info("[In Partition close] Delete the file " + path + " in partition close");
+ storeFiles.add(new StoreFile(fs, path, conf, cacheConfig, BloomType.NONE));
+ }
+ if (!storeFiles.isEmpty()) {
+ try {
+ MobUtils.removeMobFiles(conf, fs, table.getName(), mobTableDir, family.getName(),
+ storeFiles);
+ context.getCounter(SweepCounter.FILE_TO_BE_MERGE_OR_CLEAN).increment(storeFiles.size());
+ } catch (IOException e) {
+ LOG.error("Fail to archive the store files " + storeFiles, e);
+ }
+ storeFiles.clear();
+ }
+ fileStatusMap.clear();
+ }
+
+ /**
+ * Merges the small mob files into bigger ones.
+ * @param fileName The current mob file name.
+ * @param values The collection of KeyValues in this mob file.
+ * @throws IOException
+ */
+ public void execute(Text fileName, Iterable<KeyValue> values) throws IOException {
+ if (null == values) {
+ return;
+ }
+ MobFileName mobFileName = MobFileName.create(fileName.toString());
+ LOG.info("[In reducer] The file name: " + fileName.toString());
+ MobFileStatus mobFileStat = fileStatusMap.get(mobFileName.getFileName());
+ if (null == mobFileStat) {
+ LOG.info("[In reducer] Cannot find the file, probably this record is obsolete");
+ return;
+ }
+ // only handle the files that are older then one day.
+ if (compactionBegin - mobFileStat.getFileStatus().getModificationTime()
+ <= mobCompactionDelay) {
+ return;
+ }
+ // write the hfile name
+ writer.append(mobFileName.getFileName(), MobConstants.EMPTY_STRING);
+ Set<KeyValue> kvs = new HashSet<KeyValue>();
+ for (KeyValue kv : values) {
+ if (kv.getValueLength() > Bytes.SIZEOF_INT) {
+ mobFileStat.addValidSize(Bytes.toInt(kv.getValueArray(), kv.getValueOffset(),
+ Bytes.SIZEOF_INT));
+ }
+ kvs.add(kv.createKeyOnly(false));
+ }
+ // If the mob file is a invalid one or a small one, merge it into new/bigger ones.
+ if (mobFileStat.needClean() || (mergeSmall && mobFileStat.needMerge())) {
+ context.getCounter(SweepCounter.INPUT_FILE_COUNT).increment(1);
+ MobFile file = MobFile.create(fs,
+ new Path(familyDir, mobFileName.getFileName()), conf, cacheConfig);
+ StoreFileScanner scanner = null;
+ try {
+ scanner = file.getScanner();
+ scanner.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY));
+ Cell cell;
+ while (null != (cell = scanner.next())) {
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ KeyValue keyOnly = kv.createKeyOnly(false);
+ if (kvs.contains(keyOnly)) {
+ // write the KeyValue existing in HBase to the memstore.
+ memstore.addToMemstore(kv);
+ memstoreUpdated = true;
+ }
+ }
+ } finally {
+ if (scanner != null) {
+ scanner.close();
+ }
+ }
+ toBeDeleted.add(mobFileStat.getFileStatus().getPath());
+ }
+ }
+
+ /**
+ * Lists the files with the same prefix.
+ * @param p The file path.
+ * @param prefix The prefix.
+ * @return The files with the same prefix.
+ * @throws IOException
+ */
+ private FileStatus[] listStatus(Path p, String prefix) throws IOException {
+ return fs.listStatus(p, new PathPrefixFilter(prefix));
+ }
+ }
+
+ static class PathPrefixFilter implements PathFilter {
+
+ private final String prefix;
+
+ public PathPrefixFilter(String prefix) {
+ this.prefix = prefix;
+ }
+
+ public boolean accept(Path path) {
+ return path.getName().startsWith(prefix, 0);
+ }
+
+ }
+
+ /**
+ * The sweep partition id.
+ * It consists of the start key and date.
+ * The start key is a hex string of the checksum of a region start key.
+ * The date is the latest timestamp of cells in a mob file.
+ */
+ public static class SweepPartitionId {
+ private String date;
+ private String startKey;
+
+ public SweepPartitionId(MobFileName fileName) {
+ this.date = fileName.getDate();
+ this.startKey = fileName.getStartKey();
+ }
+
+ public SweepPartitionId(String date, String startKey) {
+ this.date = date;
+ this.startKey = startKey;
+ }
+
+ public static SweepPartitionId create(String key) {
+ return new SweepPartitionId(MobFileName.create(key));
+ }
+
+ @Override
+ public boolean equals(Object anObject) {
+ if (this == anObject) {
+ return true;
+ }
+ if (anObject instanceof SweepPartitionId) {
+ SweepPartitionId another = (SweepPartitionId) anObject;
+ if (this.date.equals(another.getDate()) && this.startKey.equals(another.getStartKey())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public String getDate() {
+ return this.date;
+ }
+
+ public String getStartKey() {
+ return this.startKey;
+ }
+
+ public void setDate(String date) {
+ this.date = date;
+ }
+
+ public void setStartKey(String startKey) {
+ this.startKey = startKey;
+ }
+ }
+
+ /**
+ * The mob file status used in the sweep reduecer.
+ */
+ private static class MobFileStatus {
+ private FileStatus fileStatus;
+ private int validSize;
+ private long size;
+
+ private float compactionRatio = MobConstants.DEFAULT_SWEEP_TOOL_MOB_COMPACTION_RATIO;
+ private long compactionMergeableSize =
+ MobConstants.DEFAULT_SWEEP_TOOL_MOB_COMPACTION_MERGEABLE_SIZE;
+
+ /**
+ * @param fileStatus The current FileStatus.
+ * @param compactionRatio compactionRatio the invalid ratio.
+ * If there're too many cells deleted in a mob file, it's regarded as invalid,
+ * and needs to be written to a new one.
+ * If existingCellSize/fileSize < compactionRatio, it's regarded as a invalid one.
+ * @param compactionMergeableSize compactionMergeableSize If the size of a mob file is less
+ * than this value, it's regarded as a small file and needs to be merged
+ */
+ public MobFileStatus(FileStatus fileStatus, float compactionRatio,
+ long compactionMergeableSize) {
+ this.fileStatus = fileStatus;
+ this.size = fileStatus.getLen();
+ validSize = 0;
+ this.compactionRatio = compactionRatio;
+ this.compactionMergeableSize = compactionMergeableSize;
+ }
+
+ /**
+ * Add size to this file.
+ * @param size The size to be added.
+ */
+ public void addValidSize(int size) {
+ this.validSize += size;
+ }
+
+ /**
+ * Whether the mob files need to be cleaned.
+ * If there're too many cells deleted in this mob file, it needs to be cleaned.
+ * @return True if it needs to be cleaned.
+ */
+ public boolean needClean() {
+ return validSize < compactionRatio * size;
+ }
+
+ /**
+ * Whether the mob files need to be merged.
+ * If this mob file is too small, it needs to be merged.
+ * @return True if it needs to be merged.
+ */
+ public boolean needMerge() {
+ return this.size < compactionMergeableSize;
+ }
+
+ /**
+ * Gets the file status.
+ * @return The file status.
+ */
+ public FileStatus getFileStatus() {
+ return fileStatus;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
index 73b8cb9,73b8cb9..8ff4840
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
@@@ -85,7 -85,7 +85,7 @@@ public class DefaultStoreFlusher extend
scanner.close();
}
LOG.info("Flushed, sequenceid=" + cacheFlushId +", memsize="
-- + StringUtils.humanReadableInt(snapshot.getSize()) +
++ + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getSize(), "", 1) +
", hasBloomFilter=" + writer.hasGeneralBloom() +
", into tmp file " + writer.getPath());
result.add(writer.getPath());
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ab0165d,e082698..6684309
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@@ -3276,34 -3421,12 +3422,30 @@@ public class HRegion implements HeapSiz
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
- snapshotDir, desc, exnSnare);
+ snapshotDir, desc, exnSnare);
manifest.addRegion(this);
+
+ // The regionserver holding the first region of the table is responsible for taking the
+ // manifest of the mob dir.
- if (!Bytes.equals(getStartKey(), HConstants.EMPTY_START_ROW))
++ if (!Bytes.equals(getRegionInfo().getStartKey(), HConstants.EMPTY_START_ROW))
+ return;
+
+ // if any cf's have is mob enabled, add the "mob region" to the manifest.
- Map<byte[], Store> stores = getStores();
- for (Entry<byte[], Store> store : stores.entrySet()) {
- boolean hasMobStore = store.getValue().getFamily().isMobEnabled();
++ List<Store> stores = getStores();
++ for (Store store : stores) {
++ boolean hasMobStore = store.getFamily().isMobEnabled();
+ if (hasMobStore) {
+ // use the .mob as the start key and 0 as the regionid
+ HRegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(this.getTableDesc().getTableName());
+ mobRegionInfo.setOffline(true);
+ manifest.addMobRegion(mobRegionInfo, this.getTableDesc().getColumnFamilies());
+ return;
+ }
+ }
}
- /**
- * Replaces any KV timestamps set to {@link HConstants#LATEST_TIMESTAMP} with the
- * provided current timestamp.
- * @throws IOException
- */
- void updateCellTimestamps(final Iterable<List<Cell>> cellItr, final byte[] now)
+ @Override
+ public void updateCellTimestamps(final Iterable<List<Cell>> cellItr, final byte[] now)
throws IOException {
for (List<Cell> cells: cellItr) {
if (cells == null) continue;
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 159ec55,8f7dee4..ea9558f
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@@ -549,27 -461,19 +560,28 @@@ class MetricsRegionServerWrapperImp
long tempFlushedCellsSize = 0;
long tempCompactedCellsSize = 0;
long tempMajorCompactedCellsSize = 0;
+ long tempMobCompactedIntoMobCellsCount = 0;
+ long tempMobCompactedFromMobCellsCount = 0;
+ long tempMobCompactedIntoMobCellsSize = 0;
+ long tempMobCompactedFromMobCellsSize = 0;
+ long tempMobFlushCount = 0;
+ long tempMobFlushedCellsCount = 0;
+ long tempMobFlushedCellsSize = 0;
+ long tempMobScanCellsCount = 0;
+ long tempMobScanCellsSize = 0;
long tempBlockedRequestsCount = 0L;
- for (HRegion r : regionServer.getOnlineRegionsLocalContext()) {
- tempNumMutationsWithoutWAL += r.numMutationsWithoutWAL.get();
- tempDataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
- tempReadRequestsCount += r.readRequestsCount.get();
- tempWriteRequestsCount += r.writeRequestsCount.get();
- tempCheckAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
- tempCheckAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
+ for (Region r : regionServer.getOnlineRegionsLocalContext()) {
+ tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL();
+ tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL();
+ tempReadRequestsCount += r.getReadRequestsCount();
+ tempWriteRequestsCount += r.getWriteRequestsCount();
+ tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed();
+ tempCheckAndMutateChecksPassed += r.getCheckAndMutateChecksPassed();
tempBlockedRequestsCount += r.getBlockedRequestsCount();
- tempNumStores += r.stores.size();
- for (Store store : r.stores.values()) {
+ List<Store> storeList = r.getStores();
+ tempNumStores += storeList.size();
+ for (Store store : storeList) {
tempNumStoreFiles += store.getStorefilesCount();
tempMemstoreSize += store.getMemStoreSize();
tempStoreFileSize += store.getStorefilesSize();
@@@ -582,21 -486,13 +594,25 @@@
tempFlushedCellsSize += store.getFlushedCellsSize();
tempCompactedCellsSize += store.getCompactedCellsSize();
tempMajorCompactedCellsSize += store.getMajorCompactedCellsSize();
+ if (store instanceof HMobStore) {
+ HMobStore mobStore = (HMobStore) store;
+ tempMobCompactedIntoMobCellsCount += mobStore.getMobCompactedIntoMobCellsCount();
+ tempMobCompactedFromMobCellsCount += mobStore.getMobCompactedFromMobCellsCount();
+ tempMobCompactedIntoMobCellsSize += mobStore.getMobCompactedIntoMobCellsSize();
+ tempMobCompactedFromMobCellsSize += mobStore.getMobCompactedFromMobCellsSize();
+ tempMobFlushCount += mobStore.getMobFlushCount();
+ tempMobFlushedCellsCount += mobStore.getMobFlushedCellsCount();
+ tempMobFlushedCellsSize += mobStore.getMobFlushedCellsSize();
+ tempMobScanCellsCount += mobStore.getMobScanCellsCount();
+ tempMobScanCellsSize += mobStore.getMobScanCellsSize();
+ }
}
- hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution());
+ HDFSBlocksDistribution distro = r.getHDFSBlocksDistribution();
+ hdfsBlocksDistribution.add(distro);
+ if (r.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
+ hdfsBlocksDistributionSecondaryRegions.add(distro);
+ }
}
float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java
index f7f0acd,0000000..5739df1
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java
@@@ -1,80 -1,0 +1,80 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.NavigableSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.mob.MobUtils;
+
+/**
+ * Scanner scans both the memstore and the MOB Store. Coalesce KeyValue stream into List<KeyValue>
+ * for a single row.
+ *
+ */
+@InterfaceAudience.Private
+public class MobStoreScanner extends StoreScanner {
+
+ private boolean cacheMobBlocks = false;
+ private final HMobStore mobStore;
+
+ public MobStoreScanner(Store store, ScanInfo scanInfo, Scan scan,
+ final NavigableSet<byte[]> columns, long readPt) throws IOException {
+ super(store, scanInfo, scan, columns, readPt);
+ cacheMobBlocks = MobUtils.isCacheMobBlocks(scan);
+ if (!(store instanceof HMobStore)) {
+ throw new IllegalArgumentException("The store " + store + " is not a HMobStore");
+ }
+ mobStore = (HMobStore) store;
+ }
+
+ /**
+ * Firstly reads the cells from the HBase. If the cell are a reference cell (which has the
+ * reference tag), the scanner need seek this cell from the mob file, and use the cell found
+ * from the mob file as the result.
+ */
+ @Override
- public boolean next(List<Cell> outResult, int limit) throws IOException {
- boolean result = super.next(outResult, limit);
++ public boolean next(List<Cell> outResult, ScannerContext ctx) throws IOException {
++ boolean result = super.next(outResult, ctx);
+ if (!MobUtils.isRawMobScan(scan)) {
+ // retrieve the mob data
+ if (outResult.isEmpty()) {
+ return result;
+ }
+ long mobKVCount = 0;
+ long mobKVSize = 0;
+ for (int i = 0; i < outResult.size(); i++) {
+ Cell cell = outResult.get(i);
+ if (MobUtils.isMobReferenceCell(cell)) {
+ Cell mobCell = mobStore.resolve(cell, cacheMobBlocks);
+ mobKVCount++;
+ mobKVSize += mobCell.getValueLength();
+ outResult.set(i, mobCell);
+ }
+ }
+ mobStore.updateMobScanCellsCount(mobKVCount);
+ mobStore.updateMobScanCellsSize(mobKVSize);
+ }
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
index 4c46218,0000000..85be382
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java
@@@ -1,80 -1,0 +1,80 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.NavigableSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.mob.MobUtils;
+
+/**
+ * ReversedMobStoreScanner extends from ReversedStoreScanner, and is used to support
+ * reversed scanning in both the memstore and the MOB store.
+ *
+ */
+@InterfaceAudience.Private
+public class ReversedMobStoreScanner extends ReversedStoreScanner {
+
+ private boolean cacheMobBlocks = false;
+ protected final HMobStore mobStore;
+
+ ReversedMobStoreScanner(Store store, ScanInfo scanInfo, Scan scan, NavigableSet<byte[]> columns,
+ long readPt) throws IOException {
+ super(store, scanInfo, scan, columns, readPt);
+ cacheMobBlocks = MobUtils.isCacheMobBlocks(scan);
+ if (!(store instanceof HMobStore)) {
+ throw new IllegalArgumentException("The store " + store + " is not a HMobStore");
+ }
+ mobStore = (HMobStore) store;
+ }
+
+ /**
+ * Firstly reads the cells from the HBase. If the cell are a reference cell (which has the
+ * reference tag), the scanner need seek this cell from the mob file, and use the cell found
+ * from the mob file as the result.
+ */
+ @Override
- public boolean next(List<Cell> outResult, int limit) throws IOException {
- boolean result = super.next(outResult, limit);
++ public boolean next(List<Cell> outResult, ScannerContext ctx) throws IOException {
++ boolean result = super.next(outResult, ctx);
+ if (!MobUtils.isRawMobScan(scan)) {
+ // retrieve the mob data
+ if (outResult.isEmpty()) {
+ return result;
+ }
+ long mobKVCount = 0;
+ long mobKVSize = 0;
+ for (int i = 0; i < outResult.size(); i++) {
+ Cell cell = outResult.get(i);
+ if (MobUtils.isMobReferenceCell(cell)) {
+ Cell mobCell = mobStore.resolve(cell, cacheMobBlocks);
+ mobKVCount++;
+ mobKVSize += mobCell.getValueLength();
+ outResult.set(i, mobCell);
+ }
+ }
+ mobStore.updateMobScanCellsCount(mobKVCount);
+ mobStore.updateMobScanCellsSize(mobKVSize);
+ }
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java
index 27d53ba,0000000..60fc0ff
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java
@@@ -1,251 -1,0 +1,252 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test clone snapshots from the client
+ */
+@Category({LargeTests.class, ClientTests.class})
+public class TestMobCloneSnapshotFromClient {
+ final Log LOG = LogFactory.getLog(getClass());
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ private final byte[] FAMILY = Bytes.toBytes("cf");
+
+ private byte[] emptySnapshot;
+ private byte[] snapshotName0;
+ private byte[] snapshotName1;
+ private byte[] snapshotName2;
+ private int snapshot0Rows;
+ private int snapshot1Rows;
+ private TableName tableName;
+ private Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+ TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
+ TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+ TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+ TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+ TEST_UTIL.getConfiguration().setBoolean(
+ "hbase.master.enabletable.roundrobin", true);
+ TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ TEST_UTIL.startMiniCluster(3);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * Initialize the tests with a table filled with some data
+ * and two snapshots (snapshotName0, snapshotName1) of different states.
+ * The tableName, snapshotNames and the number of rows in the snapshot are initialized.
+ */
+ @Before
+ public void setup() throws Exception {
+ this.admin = TEST_UTIL.getHBaseAdmin();
+
+ long tid = System.currentTimeMillis();
+ tableName = TableName.valueOf("testtb-" + tid);
+ emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+ snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+ snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+ snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
+
+ // create Table and disable it
+ MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
+ admin.disableTable(tableName);
+
+ // take an empty snapshot
+ admin.snapshot(emptySnapshot, tableName);
+
- HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
++ Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
++ Table table = c.getTable(tableName);
+ try {
+ // enable table and insert data
+ admin.enableTable(tableName);
+ SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
+ snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table);
+ admin.disableTable(tableName);
+
+ // take a snapshot
+ admin.snapshot(snapshotName0, tableName);
+
+ // enable table and insert more data
+ admin.enableTable(tableName);
+ SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
+ snapshot1Rows = MobSnapshotTestingUtils.countMobRows(table);
+ admin.disableTable(tableName);
+
+ // take a snapshot of the updated table
+ admin.snapshot(snapshotName1, tableName);
+
+ // re-enable table
+ admin.enableTable(tableName);
+ } finally {
+ table.close();
+ }
+ }
+
+ protected int getNumReplicas() {
+ return 1;
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (admin.tableExists(tableName)) {
+ TEST_UTIL.deleteTable(tableName);
+ }
+ SnapshotTestingUtils.deleteAllSnapshots(admin);
+ SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
+ }
+
+ @Test(expected=SnapshotDoesNotExistException.class)
+ public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
+ String snapshotName = "random-snapshot-" + System.currentTimeMillis();
+ TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName, tableName);
+ }
+
+ @Test(expected = NamespaceNotFoundException.class)
+ public void testCloneOnMissingNamespace() throws IOException, InterruptedException {
+ TableName clonedTableName = TableName.valueOf("unknownNS:clonetb");
+ admin.cloneSnapshot(snapshotName1, clonedTableName);
+ }
+
+ @Test
+ public void testCloneSnapshot() throws IOException, InterruptedException {
+ TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+ testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
+ testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
+ testCloneSnapshot(clonedTableName, emptySnapshot, 0);
+ }
+
+ private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName,
+ int snapshotRows) throws IOException, InterruptedException {
+ // create a new table from snapshot
+ admin.cloneSnapshot(snapshotName, tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshotRows);
+
+ verifyReplicasCameOnline(tableName);
+ TEST_UTIL.deleteTable(tableName);
+ }
+
+ protected void verifyReplicasCameOnline(TableName tableName) throws IOException {
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+ }
+
+ @Test
+ public void testCloneSnapshotCrossNamespace() throws IOException, InterruptedException {
+ String nsName = "testCloneSnapshotCrossNamespace";
+ admin.createNamespace(NamespaceDescriptor.create(nsName).build());
+ TableName clonedTableName =
+ TableName.valueOf(nsName, "clonedtb-" + System.currentTimeMillis());
+ testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
+ testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
+ testCloneSnapshot(clonedTableName, emptySnapshot, 0);
+ }
+
+ /**
+ * Verify that tables created from the snapshot are still alive after source table deletion.
+ */
+ @Test
+ public void testCloneLinksAfterDelete() throws IOException, InterruptedException {
+ // Clone a table from the first snapshot
+ TableName clonedTableName = TableName.valueOf("clonedtb1-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName0, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
+
+ // Take a snapshot of this cloned table.
+ admin.disableTable(clonedTableName);
+ admin.snapshot(snapshotName2, clonedTableName);
+
+ // Clone the snapshot of the cloned table
+ TableName clonedTableName2 = TableName.valueOf("clonedtb2-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName2, clonedTableName2);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
+ admin.disableTable(clonedTableName2);
+
+ // Remove the original table
+ TEST_UTIL.deleteTable(tableName);
+ waitCleanerRun();
+
+ // Verify the first cloned table
+ admin.enableTable(clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
+
+ // Verify the second cloned table
+ admin.enableTable(clonedTableName2);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
+ admin.disableTable(clonedTableName2);
+
+ // Delete the first cloned table
+ TEST_UTIL.deleteTable(clonedTableName);
+ waitCleanerRun();
+
+ // Verify the second cloned table
+ admin.enableTable(clonedTableName2);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
+
+ // Clone a new table from cloned
+ TableName clonedTableName3 = TableName.valueOf("clonedtb3-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName2, clonedTableName3);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows);
+
+ // Delete the cloned tables
+ TEST_UTIL.deleteTable(clonedTableName2);
+ TEST_UTIL.deleteTable(clonedTableName3);
+ admin.deleteSnapshot(snapshotName2);
+ }
+
+ // ==========================================================================
+ // Helpers
+ // ==========================================================================
+
+ private void waitCleanerRun() throws InterruptedException {
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java
index 0bb498d,0000000..6fc2d28
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java
@@@ -1,304 -1,0 +1,306 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test restore snapshots from the client
+ */
+@Category({ClientTests.class, LargeTests.class})
+public class TestMobRestoreSnapshotFromClient {
+ final Log LOG = LogFactory.getLog(getClass());
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ private final byte[] FAMILY = Bytes.toBytes("cf");
+
+ private byte[] emptySnapshot;
+ private byte[] snapshotName0;
+ private byte[] snapshotName1;
+ private byte[] snapshotName2;
+ private int snapshot0Rows;
+ private int snapshot1Rows;
+ private TableName tableName;
+ private Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+ TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
+ TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+ TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+ TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+ TEST_UTIL.getConfiguration().setBoolean(
+ "hbase.master.enabletable.roundrobin", true);
+ TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ TEST_UTIL.startMiniCluster(3);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * Initialize the tests with a table filled with some data
+ * and two snapshots (snapshotName0, snapshotName1) of different states.
+ * The tableName, snapshotNames and the number of rows in the snapshot are initialized.
+ */
+ @Before
+ public void setup() throws Exception {
+ this.admin = TEST_UTIL.getHBaseAdmin();
+
+ long tid = System.currentTimeMillis();
+ tableName =
+ TableName.valueOf("testtb-" + tid);
+ emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
+ snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+ snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+ snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
+
+ // create Table and disable it
+ MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
+
+ admin.disableTable(tableName);
+
+ // take an empty snapshot
+ admin.snapshot(emptySnapshot, tableName);
+
- HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
++ Table table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getTable(tableName);
+ // enable table and insert data
+ admin.enableTable(tableName);
+ SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
+ snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table);
+ admin.disableTable(tableName);
+
+ // take a snapshot
+ admin.snapshot(snapshotName0, tableName);
+
+ // enable table and insert more data
+ admin.enableTable(tableName);
+ SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
+ snapshot1Rows = MobSnapshotTestingUtils.countMobRows(table);
+ table.close();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TEST_UTIL.deleteTable(tableName);
+ SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
+ }
+
+ @Test
+ public void testRestoreSnapshot() throws IOException {
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot1Rows);
+ admin.disableTable(tableName);
+ admin.snapshot(snapshotName1, tableName);
+ // Restore from snapshot-0
+ admin.restoreSnapshot(snapshotName0);
+ admin.enableTable(tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot0Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+ // Restore from emptySnapshot
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(emptySnapshot);
+ admin.enableTable(tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, 0);
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+ // Restore from snapshot-1
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName1);
+ admin.enableTable(tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot1Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+
+ // Restore from snapshot-1
+ TEST_UTIL.deleteTable(tableName);
+ admin.restoreSnapshot(snapshotName1);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot1Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+ }
+
+ protected int getNumReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void testRestoreSchemaChange() throws Exception {
+ byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
+
- HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
++ Table table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getTable(tableName);
+
+ // Add one column family and put some data in it
+ admin.disableTable(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ admin.addColumn(tableName, hcd);
+ admin.enableTable(tableName);
+ assertEquals(2, table.getTableDescriptor().getFamilies().size());
+ HTableDescriptor htd = admin.getTableDescriptor(tableName);
+ assertEquals(2, htd.getFamilies().size());
+ SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, TEST_FAMILY2);
+ long snapshot2Rows = snapshot1Rows + 500;
+ assertEquals(snapshot2Rows, MobSnapshotTestingUtils.countMobRows(table));
+ assertEquals(500, MobSnapshotTestingUtils.countMobRows(table, TEST_FAMILY2));
+ Set<String> fsFamilies = getFamiliesFromFS(tableName);
+ assertEquals(2, fsFamilies.size());
+
+ // Take a snapshot
+ admin.disableTable(tableName);
+ admin.snapshot(snapshotName2, tableName);
+
+ // Restore the snapshot (without the cf)
+ admin.restoreSnapshot(snapshotName0);
+ admin.enableTable(tableName);
+ assertEquals(1, table.getTableDescriptor().getFamilies().size());
+ try {
+ MobSnapshotTestingUtils.countMobRows(table, TEST_FAMILY2);
+ fail("family '" + Bytes.toString(TEST_FAMILY2) + "' should not exists");
+ } catch (NoSuchColumnFamilyException e) {
+ // expected
+ }
+ assertEquals(snapshot0Rows, MobSnapshotTestingUtils.countMobRows(table));
+ htd = admin.getTableDescriptor(tableName);
+ assertEquals(1, htd.getFamilies().size());
+ fsFamilies = getFamiliesFromFS(tableName);
+ assertEquals(1, fsFamilies.size());
+
+ // Restore back the snapshot (with the cf)
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName2);
+ admin.enableTable(tableName);
+ htd = admin.getTableDescriptor(tableName);
+ assertEquals(2, htd.getFamilies().size());
+ assertEquals(2, table.getTableDescriptor().getFamilies().size());
+ assertEquals(500, MobSnapshotTestingUtils.countMobRows(table, TEST_FAMILY2));
+ assertEquals(snapshot2Rows, MobSnapshotTestingUtils.countMobRows(table));
+ fsFamilies = getFamiliesFromFS(tableName);
+ assertEquals(2, fsFamilies.size());
+ table.close();
+ }
+
+ @Test
+ public void testCloneSnapshotOfCloned() throws IOException, InterruptedException {
+ TableName clonedTableName =
+ TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName0, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
+ admin.disableTable(clonedTableName);
+ admin.snapshot(snapshotName2, clonedTableName);
+ TEST_UTIL.deleteTable(clonedTableName);
+ waitCleanerRun();
+
+ admin.cloneSnapshot(snapshotName2, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
+ TEST_UTIL.deleteTable(clonedTableName);
+ }
+
+ @Test
+ public void testCloneAndRestoreSnapshot() throws IOException, InterruptedException {
+ TEST_UTIL.deleteTable(tableName);
+ waitCleanerRun();
+
+ admin.cloneSnapshot(snapshotName0, tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot0Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+ waitCleanerRun();
+
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName0);
+ admin.enableTable(tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot0Rows);
+ SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
+ }
+
+ @Test
+ public void testCorruptedSnapshot() throws IOException, InterruptedException {
+ SnapshotTestingUtils.corruptSnapshot(TEST_UTIL, Bytes.toString(snapshotName0));
+ TableName cloneName = TableName.valueOf("corruptedClone-" + System.currentTimeMillis());
+ try {
+ admin.cloneSnapshot(snapshotName0, cloneName);
+ fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()");
+ } catch (CorruptedSnapshotException e) {
+ // Got the expected corruption exception.
+ // check for no references of the cloned table.
+ assertFalse(admin.tableExists(cloneName));
+ } catch (Exception e) {
+ fail("Expected CorruptedSnapshotException got: " + e);
+ }
+ }
+
+ // ==========================================================================
+ // Helpers
+ // ==========================================================================
+ private void waitCleanerRun() throws InterruptedException {
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
+ }
+
+ private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException {
+ MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+ Set<String> families = new HashSet<String>();
+ Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
+ for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) {
+ for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) {
+ families.add(familyDir.getName());
+ }
+ }
+ return families;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
index 612b98a,0000000..a2cd51c
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java
@@@ -1,376 -1,0 +1,375 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test to verify that the cloned table is independent of the table from which it was cloned
+ */
+@Category(LargeTests.class)
+public class TestMobSnapshotCloneIndependence {
+ private static final Log LOG = LogFactory.getLog(TestSnapshotCloneIndependence.class);
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static final int NUM_RS = 2;
+ private static final String STRING_TABLE_NAME = "test";
+ private static final String TEST_FAM_STR = "fam";
+ private static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
+ private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
+
+ /**
+ * Setup the config for the cluster and start it
+ * @throws Exception on failure
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(NUM_RS);
+ }
+
+ private static void setupConf(Configuration conf) {
+ // enable snapshot support
+ conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // change the flush size to a small amount, regulating number of store files
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // so make sure we get a compaction when doing a load, but keep around
+ // some files in the store
+ conf.setInt("hbase.hstore.compaction.min", 10);
+ conf.setInt("hbase.hstore.compactionThreshold", 10);
+ // block writes if we get to 12 store files
+ conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+ conf.setInt("hbase.regionserver.msginterval", 100);
+ conf.setBoolean("hbase.master.enabletable.roundrobin", true);
+ // Avoid potentially aggressive splitting which would cause snapshot to fail
+ conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+ ConstantSizeRegionSplitPolicy.class.getName());
+ conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ MobSnapshotTestingUtils.createMobTable(UTIL, TableName.valueOf(STRING_TABLE_NAME), TEST_FAM);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ UTIL.deleteTable(TABLE_NAME);
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ /**
+ * Verify that adding data to the cloned table will not affect the original, and vice-versa when
+ * it is taken as an online snapshot.
+ */
+ @Test (timeout=300000)
+ public void testOnlineSnapshotAppendIndependent() throws Exception {
+ runTestSnapshotAppendIndependent(true);
+ }
+
+ /**
+ * Verify that adding data to the cloned table will not affect the original, and vice-versa when
+ * it is taken as an offline snapshot.
+ */
+ @Test (timeout=300000)
+ public void testOfflineSnapshotAppendIndependent() throws Exception {
+ runTestSnapshotAppendIndependent(false);
+ }
+
+ /**
+ * Verify that adding metadata to the cloned table will not affect the original, and vice-versa
+ * when it is taken as an online snapshot.
+ */
+ @Test (timeout=300000)
+ public void testOnlineSnapshotMetadataChangesIndependent() throws Exception {
+ runTestSnapshotMetadataChangesIndependent(true);
+ }
+
+ /**
+ * Verify that adding netadata to the cloned table will not affect the original, and vice-versa
+ * when is taken as an online snapshot.
+ */
+ @Test (timeout=300000)
+ public void testOfflineSnapshotMetadataChangesIndependent() throws Exception {
+ runTestSnapshotMetadataChangesIndependent(false);
+ }
+
+ /**
+ * Verify that region operations, in this case splitting a region, are independent between the
+ * cloned table and the original.
+ */
+ @Test (timeout=300000)
+ public void testOfflineSnapshotRegionOperationsIndependent() throws Exception {
+ runTestRegionOperationsIndependent(false);
+ }
+
+ /**
+ * Verify that region operations, in this case splitting a region, are independent between the
+ * cloned table and the original.
+ */
+ @Test (timeout=300000)
+ public void testOnlineSnapshotRegionOperationsIndependent() throws Exception {
+ runTestRegionOperationsIndependent(true);
+ }
+
+ private static void waitOnSplit(final HTable t, int originalCount) throws Exception {
+ for (int i = 0; i < 200; i++) {
+ try {
+ Thread.sleep(50);
+ } catch (InterruptedException e) {
+ // Restore the interrupted status
+ Thread.currentThread().interrupt();
+ }
+ if (t.getRegionLocations().size() > originalCount) {
+ return;
+ }
+ }
+ throw new Exception("Split did not increase the number of regions");
+ }
+
+ /*
+ * Take a snapshot of a table, add data, and verify that this only
+ * affects one table
+ * @param online - Whether the table is online or not during the snapshot
+ */
+ private void runTestSnapshotAppendIndependent(boolean online) throws Exception {
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+
+ Admin admin = UTIL.getHBaseAdmin();
+ final long startTime = System.currentTimeMillis();
+ final TableName localTableName =
+ TableName.valueOf(STRING_TABLE_NAME + startTime);
+
- HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
++ Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
+ try {
+
+ SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
+ final int origTableRowCount = MobSnapshotTestingUtils.countMobRows(original);
+
+ // Take a snapshot
+ final String snapshotNameAsString = "snapshot_" + localTableName;
+ byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
+
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
+ snapshotNameAsString, rootDir, fs, online);
+
+ if (!online) {
+ admin.enableTable(localTableName);
+ }
+ TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
+ admin.cloneSnapshot(snapshotName, cloneTableName);
+
- HTable clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName);
++ Table clonedTable = ConnectionFactory.createConnection(UTIL.getConfiguration())
++ .getTable(cloneTableName);
+
+ try {
+ final int clonedTableRowCount = MobSnapshotTestingUtils.countMobRows(clonedTable);
+
+ Assert.assertEquals(
+ "The line counts of original and cloned tables do not match after clone. ",
+ origTableRowCount, clonedTableRowCount);
+
+ // Attempt to add data to the test
+ final String rowKey = "new-row-" + System.currentTimeMillis();
+
+ Put p = new Put(Bytes.toBytes(rowKey));
+ p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
+ original.put(p);
- original.flushCommits();
+
+ // Verify that it is not present in the original table
+ Assert.assertEquals("The row count of the original table was not modified by the put",
+ origTableRowCount + 1, MobSnapshotTestingUtils.countMobRows(original));
+ Assert.assertEquals(
+ "The row count of the cloned table changed as a result of addition to the original",
+ clonedTableRowCount, MobSnapshotTestingUtils.countMobRows(clonedTable));
+
+ p = new Put(Bytes.toBytes(rowKey));
- p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
++ p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
+ clonedTable.put(p);
- clonedTable.flushCommits();
+
+ // Verify that the new family is not in the restored table's description
+ Assert.assertEquals(
+ "The row count of the original table was modified by the put to the clone",
+ origTableRowCount + 1, MobSnapshotTestingUtils.countMobRows(original));
+ Assert.assertEquals("The row count of the cloned table was not modified by the put",
+ clonedTableRowCount + 1, MobSnapshotTestingUtils.countMobRows(clonedTable));
+ } finally {
+
+ clonedTable.close();
+ }
+ } finally {
+
+ original.close();
+ }
+ }
+
+ /*
+ * Take a snapshot of a table, do a split, and verify that this only affects one table
+ * @param online - Whether the table is online or not during the snapshot
+ */
+ private void runTestRegionOperationsIndependent(boolean online) throws Exception {
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+
+ // Create a table
+ Admin admin = UTIL.getHBaseAdmin();
+ final long startTime = System.currentTimeMillis();
+ final TableName localTableName =
+ TableName.valueOf(STRING_TABLE_NAME + startTime);
- HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
++ Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
+ SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
+ final int loadedTableCount = MobSnapshotTestingUtils.countMobRows(original);
+ System.out.println("Original table has: " + loadedTableCount + " rows");
+
+ final String snapshotNameAsString = "snapshot_" + localTableName;
+
+ // Create a snapshot
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
+ snapshotNameAsString, rootDir, fs, online);
+
+ if (!online) {
+ admin.enableTable(localTableName);
+ }
+
+ TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
+
+ // Clone the snapshot
+ byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
+ admin.cloneSnapshot(snapshotName, cloneTableName);
+
+ // Verify that region information is the same pre-split
- original.clearRegionCache();
++ ((HTable)original).clearRegionCache();
+ List<HRegionInfo> originalTableHRegions = admin.getTableRegions(localTableName);
+
+ final int originalRegionCount = originalTableHRegions.size();
+ final int cloneTableRegionCount = admin.getTableRegions(cloneTableName).size();
+ Assert.assertEquals(
+ "The number of regions in the cloned table is different than in the original table.",
+ originalRegionCount, cloneTableRegionCount);
+
+ // Split a region on the parent table
+ admin.splitRegion(originalTableHRegions.get(0).getRegionName());
- waitOnSplit(original, originalRegionCount);
++ waitOnSplit((HTable)original, originalRegionCount);
+
+ // Verify that the cloned table region is not split
+ final int cloneTableRegionCount2 = admin.getTableRegions(cloneTableName).size();
+ Assert.assertEquals(
+ "The number of regions in the cloned table changed though none of its regions were split.",
+ cloneTableRegionCount, cloneTableRegionCount2);
+ }
+
+ /*
+ * Take a snapshot of a table, add metadata, and verify that this only
+ * affects one table
+ * @param online - Whether the table is online or not during the snapshot
+ */
+ private void runTestSnapshotMetadataChangesIndependent(boolean online) throws Exception {
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+
+ // Create a table
+ Admin admin = UTIL.getHBaseAdmin();
+ final long startTime = System.currentTimeMillis();
+ final TableName localTableName =
+ TableName.valueOf(STRING_TABLE_NAME + startTime);
- HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
++ Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
+ SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
+
+ final String snapshotNameAsString = "snapshot_" + localTableName;
+
+ // Create a snapshot
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
+ snapshotNameAsString, rootDir, fs, online);
+
+ if (!online) {
+ admin.enableTable(localTableName);
+ }
+ TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
+
+ // Clone the snapshot
+ byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
+ admin.cloneSnapshot(snapshotName, cloneTableName);
+
+ // Add a new column family to the original table
+ byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
+ HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2);
+
+ admin.disableTable(localTableName);
+ admin.addColumn(localTableName, hcd);
+
+ // Verify that it is not in the snapshot
+ admin.enableTable(localTableName);
+
+ // get a description of the cloned table
+ // get a list of its families
+ // assert that the family is there
+ HTableDescriptor originalTableDescriptor = original.getTableDescriptor();
+ HTableDescriptor clonedTableDescriptor = admin.getTableDescriptor(cloneTableName);
+
+ Assert.assertTrue("The original family was not found. There is something wrong. ",
+ originalTableDescriptor.hasFamily(TEST_FAM));
+ Assert.assertTrue("The original family was not found in the clone. There is something wrong. ",
+ clonedTableDescriptor.hasFamily(TEST_FAM));
+
+ Assert.assertTrue("The new family was not found. ",
+ originalTableDescriptor.hasFamily(TEST_FAM_2));
+ Assert.assertTrue("The new family was not found. ",
+ !clonedTableDescriptor.hasFamily(TEST_FAM_2));
+ }
+}
[15/50] [abbrv] hbase git commit: HBASE-13204 Procedure v2 - client
create/delete table sync
Posted by jm...@apache.org.
HBASE-13204 Procedure v2 - client create/delete table sync
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a6e3f46
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a6e3f46
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a6e3f46
Branch: refs/heads/hbase-11339
Commit: 6a6e3f46fde83d602e2d25cf38bff32c770004fc
Parents: b5f1f98
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 21:01:20 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:42 2015 +0100
----------------------------------------------------------------------
.../hbase/client/ConnectionImplementation.java | 6 +
.../apache/hadoop/hbase/client/HBaseAdmin.java | 608 ++++-
.../hbase/client/TestProcedureFuture.java | 186 ++
.../hbase/protobuf/generated/MasterProtos.java | 2576 +++++++++++++++---
hbase-protocol/src/main/protobuf/Master.proto | 24 +
.../org/apache/hadoop/hbase/master/HMaster.java | 12 +-
.../hadoop/hbase/master/MasterRpcServices.java | 51 +-
.../hadoop/hbase/master/MasterServices.java | 4 +-
.../master/procedure/DeleteTableProcedure.java | 1 +
.../hadoop/hbase/master/TestCatalogJanitor.java | 7 +-
.../apache/hadoop/hbase/util/TestHBaseFsck.java | 2 +-
11 files changed, 2922 insertions(+), 555 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 8442a77..bc2d51a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1598,6 +1598,12 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
}
@Override
+ public MasterProtos.GetProcedureResultResponse getProcedureResult(RpcController controller,
+ MasterProtos.GetProcedureResultRequest request) throws ServiceException {
+ return stub.getProcedureResult(controller, request);
+ }
+
+ @Override
public MasterProtos.IsMasterRunningResponse isMasterRunning(
RpcController controller, MasterProtos.IsMasterRunningRequest request)
throws ServiceException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 21a9139..7882737 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -31,6 +31,10 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
@@ -62,6 +66,7 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
@@ -89,10 +94,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
@@ -101,6 +108,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResp
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
@@ -142,6 +151,7 @@ import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -186,6 +196,7 @@ public class HBaseAdmin implements Admin {
// numRetries is for 'normal' stuff... Multiply by this factor when
// want to wait a long time.
private final int retryLongerMultiplier;
+ private final int syncWaitTimeout;
private boolean aborted;
private boolean cleanupConnectionOnClose = false; // close the connection in close()
private boolean closed = false;
@@ -242,6 +253,8 @@ public class HBaseAdmin implements Admin {
"hbase.client.retries.longer.multiplier", 10);
this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+ this.syncWaitTimeout = this.conf.getInt(
+ "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
}
@@ -541,92 +554,23 @@ public class HBaseAdmin implements Admin {
*/
@Override
public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
- throws IOException {
+ throws IOException {
+ Future<Void> future = createTableAsyncV2(desc, splitKeys);
try {
- createTableAsync(desc, splitKeys);
- } catch (SocketTimeoutException ste) {
- LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
- }
- int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
- int prevRegCount = 0;
- boolean tableWasEnabled = false;
- for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier;
- ++tries) {
- if (tableWasEnabled) {
- // Wait all table regions comes online
- final AtomicInteger actualRegCount = new AtomicInteger(0);
- MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
- @Override
- public boolean visit(Result rowResult) throws IOException {
- RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
- if (list == null) {
- LOG.warn("No serialized HRegionInfo in " + rowResult);
- return true;
- }
- HRegionLocation l = list.getRegionLocation();
- if (l == null) {
- return true;
- }
- if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
- return false;
- }
- if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
- HRegionLocation[] locations = list.getRegionLocations();
- for (HRegionLocation location : locations) {
- if (location == null) continue;
- ServerName serverName = location.getServerName();
- // Make sure that regions are assigned to server
- if (serverName != null && serverName.getHostAndPort() != null) {
- actualRegCount.incrementAndGet();
- }
- }
- return true;
- }
- };
- MetaTableAccessor.scanMetaForTableRegions(connection, visitor, desc.getTableName());
- if (actualRegCount.get() < numRegs) {
- if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
- throw new RegionOfflineException("Only " + actualRegCount.get() +
- " of " + numRegs + " regions are online; retries exhausted.");
- }
- try { // Sleep
- Thread.sleep(getPauseTime(tries));
- } catch (InterruptedException e) {
- throw new InterruptedIOException("Interrupted when opening" +
- " regions; " + actualRegCount.get() + " of " + numRegs +
- " regions processed so far");
- }
- if (actualRegCount.get() > prevRegCount) { // Making progress
- prevRegCount = actualRegCount.get();
- tries = -1;
- }
- } else {
- return;
- }
+ // TODO: how long should we wait? spin forever?
+ future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted when waiting" +
+ " for table to be enabled; meta scan was done");
+ } catch (TimeoutException e) {
+ throw new TimeoutIOException(e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
} else {
- try {
- tableWasEnabled = isTableAvailable(desc.getTableName());
- } catch (TableNotFoundException tnfe) {
- LOG.debug(
- "Table " + desc.getTableName() + " was not enabled, sleeping, still " + numRetries
- + " retries left");
- }
- if (tableWasEnabled) {
- // no we will scan meta to ensure all regions are online
- tries = -1;
- } else {
- try { // Sleep
- Thread.sleep(getPauseTime(tries));
- } catch (InterruptedException e) {
- throw new InterruptedIOException("Interrupted when waiting" +
- " for table to be enabled; meta scan was done");
- }
- }
+ throw new IOException(e.getCause());
}
}
- throw new TableNotEnabledException(
- "Retries exhausted while still waiting for table: "
- + desc.getTableName() + " to be enabled");
}
/**
@@ -646,22 +590,42 @@ public class HBaseAdmin implements Admin {
* @throws IOException
*/
@Override
- public void createTableAsync(
- final HTableDescriptor desc, final byte [][] splitKeys)
- throws IOException {
- if(desc.getTableName() == null) {
+ public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys)
+ throws IOException {
+ createTableAsyncV2(desc, splitKeys);
+ }
+
+ /**
+ * Creates a new table but does not block and wait for it to come online.
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+ * It may throw ExecutionException if there was an error while executing the operation
+ * or TimeoutException in case the wait timeout was not long enough to allow the
+ * operation to complete.
+ *
+ * @param desc table descriptor for table
+ * @param splitKeys keys to check if the table has been created with all split keys
+ * @throws IllegalArgumentException Bad table name, if the split keys
+ * are repeated and if the split key has empty byte array.
+ * @throws IOException if a remote or network exception occurs
+ * @return the result of the async creation. You can use Future.get(long, TimeUnit)
+ * to wait on the operation to complete.
+ */
+ // TODO: This should be called Async but it will break binary compatibility
+ private Future<Void> createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys)
+ throws IOException {
+ if (desc.getTableName() == null) {
throw new IllegalArgumentException("TableName cannot be null");
}
- if(splitKeys != null && splitKeys.length > 0) {
+ if (splitKeys != null && splitKeys.length > 0) {
Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
// Verify there are no duplicate split keys
- byte [] lastKey = null;
- for(byte [] splitKey : splitKeys) {
+ byte[] lastKey = null;
+ for (byte[] splitKey : splitKeys) {
if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
throw new IllegalArgumentException(
"Empty split key must not be passed in the split keys.");
}
- if(lastKey != null && Bytes.equals(splitKey, lastKey)) {
+ if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
throw new IllegalArgumentException("All split keys must be unique, " +
"found duplicate: " + Bytes.toStringBinary(splitKey) +
", " + Bytes.toStringBinary(lastKey));
@@ -670,14 +634,127 @@ public class HBaseAdmin implements Admin {
}
}
- executeCallable(new MasterCallable<Void>(getConnection()) {
+ CreateTableResponse response = executeCallable(
+ new MasterCallable<CreateTableResponse>(getConnection()) {
@Override
- public Void call(int callTimeout) throws ServiceException {
+ public CreateTableResponse call(int callTimeout) throws ServiceException {
CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
- master.createTable(null, request);
- return null;
+ return master.createTable(null, request);
}
});
+ return new CreateTableFuture(this, desc, splitKeys, response);
+ }
+
+ private static class CreateTableFuture extends ProcedureFuture<Void> {
+ private final HTableDescriptor desc;
+ private final byte[][] splitKeys;
+
+ public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc,
+ final byte[][] splitKeys, final CreateTableResponse response) {
+ super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
+ this.splitKeys = splitKeys;
+ this.desc = desc;
+ }
+
+ @Override
+ protected Void waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitForTableEnabled(deadlineTs);
+ waitForAllRegionsOnline(deadlineTs);
+ return null;
+ }
+
+ @Override
+ protected Void postOperationResult(final Void result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ LOG.info("Created " + desc.getTableName());
+ return result;
+ }
+
+ private void waitForTableEnabled(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitForState(deadlineTs, new WaitForStateCallable() {
+ @Override
+ public boolean checkState(int tries) throws IOException {
+ try {
+ if (getAdmin().isTableAvailable(desc.getTableName())) {
+ return true;
+ }
+ } catch (TableNotFoundException tnfe) {
+ LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+ tries);
+ }
+ return false;
+ }
+
+ @Override
+ public void throwInterruptedException() throws InterruptedIOException {
+ throw new InterruptedIOException("Interrupted when waiting for table " +
+ desc.getTableName() + " to be enabled");
+ }
+
+ @Override
+ public void throwTimeoutException(long elapsedTime) throws TimeoutException {
+ throw new TimeoutException("Table " + desc.getTableName() +
+ " not enabled after " + elapsedTime + "msec");
+ }
+ });
+ }
+
+ private void waitForAllRegionsOnline(final long deadlineTs)
+ throws IOException, TimeoutException {
+ final AtomicInteger actualRegCount = new AtomicInteger(0);
+ final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
+ @Override
+ public boolean visit(Result rowResult) throws IOException {
+ RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
+ if (list == null) {
+ LOG.warn("No serialized HRegionInfo in " + rowResult);
+ return true;
+ }
+ HRegionLocation l = list.getRegionLocation();
+ if (l == null) {
+ return true;
+ }
+ if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
+ return false;
+ }
+ if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
+ HRegionLocation[] locations = list.getRegionLocations();
+ for (HRegionLocation location : locations) {
+ if (location == null) continue;
+ ServerName serverName = location.getServerName();
+ // Make sure that regions are assigned to server
+ if (serverName != null && serverName.getHostAndPort() != null) {
+ actualRegCount.incrementAndGet();
+ }
+ }
+ return true;
+ }
+ };
+
+ int tries = 0;
+ IOException serverEx = null;
+ int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
+ while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
+ actualRegCount.set(0);
+ MetaTableAccessor.scanMetaForTableRegions(
+ getAdmin().getConnection(), visitor, desc.getTableName());
+ if (actualRegCount.get() == numRegs) {
+ // all the regions are online
+ return;
+ }
+
+ try {
+ Thread.sleep(getAdmin().getPauseTime(tries++));
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted when opening" +
+ " regions; " + actualRegCount.get() + " of " + numRegs +
+ " regions processed so far");
+ }
+ }
+ throw new TimeoutException("Only " + actualRegCount.get() +
+ " of " + numRegs + " regions are online; retries exhausted.");
+ }
}
public void deleteTable(final String tableName) throws IOException {
@@ -697,48 +774,93 @@ public class HBaseAdmin implements Admin {
*/
@Override
public void deleteTable(final TableName tableName) throws IOException {
- boolean tableExists = true;
+ Future<Void> future = deleteTableAsyncV2(tableName);
+ try {
+ future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
+ } catch (TimeoutException e) {
+ throw new TimeoutIOException(e);
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
+ } else {
+ throw new IOException(e.getCause());
+ }
+ }
+ }
- executeCallable(new MasterCallable<Void>(getConnection()) {
+ /**
+ * Deletes the table but does not block and wait for it be completely removed.
+ * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+ * It may throw ExecutionException if there was an error while executing the operation
+ * or TimeoutException in case the wait timeout was not long enough to allow the
+ * operation to complete.
+ *
+ * @param desc table descriptor for table
+ * @param tableName name of table to delete
+ * @throws IOException if a remote or network exception occurs
+ * @return the result of the async delete. You can use Future.get(long, TimeUnit)
+ * to wait on the operation to complete.
+ */
+ // TODO: This should be called Async but it will break binary compatibility
+ private Future<Void> deleteTableAsyncV2(final TableName tableName) throws IOException {
+ DeleteTableResponse response = executeCallable(
+ new MasterCallable<DeleteTableResponse>(getConnection()) {
@Override
- public Void call(int callTimeout) throws ServiceException {
+ public DeleteTableResponse call(int callTimeout) throws ServiceException {
DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
- master.deleteTable(null,req);
- return null;
+ return master.deleteTable(null,req);
}
});
+ return new DeleteTableFuture(this, tableName, response);
+ }
- int failures = 0;
- for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
- try {
- tableExists = tableExists(tableName);
- if (!tableExists)
- break;
- } catch (IOException ex) {
- failures++;
- if(failures >= numRetries - 1) { // no more tries left
- if (ex instanceof RemoteException) {
- throw ((RemoteException) ex).unwrapRemoteException();
- } else {
- throw ex;
- }
- }
- }
- try {
- Thread.sleep(getPauseTime(tries));
- } catch (InterruptedException e) {
- throw new InterruptedIOException("Interrupted when waiting" +
- " for table to be deleted");
- }
+ private static class DeleteTableFuture extends ProcedureFuture<Void> {
+ private final TableName tableName;
+
+ public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
+ final DeleteTableResponse response) {
+ super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
+ this.tableName = tableName;
+ }
+
+ @Override
+ protected Void waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitTableNotFound(deadlineTs);
+ return null;
+ }
+
+ @Override
+ protected Void postOperationResult(final Void result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ // Delete cached information to prevent clients from using old locations
+ getAdmin().getConnection().clearRegionCache(tableName);
+ LOG.info("Deleted " + tableName);
+ return result;
}
- if (tableExists) {
- throw new IOException("Retries exhausted, it took too long to wait"+
- " for the table " + tableName + " to be deleted.");
+ private void waitTableNotFound(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitForState(deadlineTs, new WaitForStateCallable() {
+ @Override
+ public boolean checkState(int tries) throws IOException {
+ return !getAdmin().tableExists(tableName);
+ }
+
+ @Override
+ public void throwInterruptedException() throws InterruptedIOException {
+ throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
+ }
+
+ @Override
+ public void throwTimeoutException(long elapsedTime) throws TimeoutException {
+ throw new TimeoutException("Table " + tableName + " not yet deleted after " +
+ elapsedTime + "msec");
+ }
+ });
}
- // Delete cached information to prevent clients from using old locations
- this.connection.clearRegionCache(tableName);
- LOG.info("Deleted " + tableName);
}
/**
@@ -3834,4 +3956,236 @@ public class HBaseAdmin implements Admin {
}
});
}
+
+ /**
+ * Future that waits on a procedure result.
+ * Returned by the async version of the Admin calls,
+ * and used internally by the sync calls to wait on the result of the procedure.
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ protected static class ProcedureFuture<V> implements Future<V> {
+ private ExecutionException exception = null;
+ private boolean procResultFound = false;
+ private boolean done = false;
+ private V result = null;
+
+ private final HBaseAdmin admin;
+ private final Long procId;
+
+ public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
+ this.admin = admin;
+ this.procId = procId;
+ }
+
+ @Override
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean isCancelled() {
+ // TODO: Abort not implemented yet
+ return false;
+ }
+
+ @Override
+ public V get() throws InterruptedException, ExecutionException {
+ // TODO: should we ever spin forever?
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public V get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (!done) {
+ long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
+ try {
+ try {
+ // if the master support procedures, try to wait the result
+ if (procId != null) {
+ result = waitProcedureResult(procId, deadlineTs);
+ }
+ // if we don't have a proc result, try the compatibility wait
+ if (!procResultFound) {
+ result = waitOperationResult(deadlineTs);
+ }
+ result = postOperationResult(result, deadlineTs);
+ done = true;
+ } catch (IOException e) {
+ result = postOpeartionFailure(e, deadlineTs);
+ done = true;
+ }
+ } catch (IOException e) {
+ exception = new ExecutionException(e);
+ done = true;
+ }
+ }
+ if (exception != null) {
+ throw exception;
+ }
+ return result;
+ }
+
+ @Override
+ public boolean isDone() {
+ return done;
+ }
+
+ protected HBaseAdmin getAdmin() {
+ return admin;
+ }
+
+ private V waitProcedureResult(long procId, long deadlineTs)
+ throws IOException, TimeoutException, InterruptedException {
+ GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
+ .setProcId(procId)
+ .build();
+
+ int tries = 0;
+ IOException serviceEx = null;
+ while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
+ GetProcedureResultResponse response = null;
+ try {
+ // Try to fetch the result
+ response = getProcedureResult(request);
+ } catch (IOException e) {
+ serviceEx = unwrapException(e);
+
+ // the master may be down
+ LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
+
+ // Not much to do, if we have a DoNotRetryIOException
+ if (serviceEx instanceof DoNotRetryIOException) {
+ // TODO: looks like there is no way to unwrap this exception and get the proper
+ // UnsupportedOperationException aside from looking at the message.
+ // anyway, if we fail here we just failover to the compatibility side
+ // and that is always a valid solution.
+ LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
+ procResultFound = false;
+ return null;
+ }
+ }
+
+ // If the procedure is no longer running, we should have a result
+ if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
+ procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
+ return convertResult(response);
+ }
+
+ try {
+ Thread.sleep(getAdmin().getPauseTime(tries++));
+ } catch (InterruptedException e) {
+ throw new InterruptedException(
+ "Interrupted while waiting for the result of proc " + procId);
+ }
+ }
+ if (serviceEx != null) {
+ throw serviceEx;
+ } else {
+ throw new TimeoutException("The procedure " + procId + " is still running");
+ }
+ }
+
+ private static IOException unwrapException(IOException e) {
+ if (e instanceof RemoteException) {
+ return ((RemoteException)e).unwrapRemoteException();
+ }
+ return e;
+ }
+
+ protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
+ throws IOException {
+ return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
+ admin.getConnection()) {
+ @Override
+ public GetProcedureResultResponse call(int callTimeout) throws ServiceException {
+ return master.getProcedureResult(null, request);
+ }
+ });
+ }
+
+ /**
+ * Convert the procedure result response to a specified type.
+ * @param response the procedure result object to parse
+ * @return the result data of the procedure.
+ */
+ protected V convertResult(final GetProcedureResultResponse response) throws IOException {
+ if (response.hasException()) {
+ throw ForeignExceptionUtil.toIOException(response.getException());
+ }
+ return null;
+ }
+
+ /**
+ * Fallback implementation in case the procedure is not supported by the server.
+ * It should try to wait until the operation is completed.
+ * @param deadlineTs the timestamp after which this method should throw a TimeoutException
+ * @return the result data of the operation
+ */
+ protected V waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ return null;
+ }
+
+ /**
+ * Called after the operation is completed and the result fetched.
+ * this allows to perform extra steps after the procedure is completed.
+ * it allows to apply transformations to the result that will be returned by get().
+ * @param result the result of the procedure
+ * @param deadlineTs the timestamp after which this method should throw a TimeoutException
+ * @return the result of the procedure, which may be the same as the passed one
+ */
+ protected V postOperationResult(final V result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ return result;
+ }
+
+ /**
+ * Called after the operation is terminated with a failure.
+ * this allows to perform extra steps after the procedure is terminated.
+ * it allows to apply transformations to the result that will be returned by get().
+ * The default implementation will rethrow the exception
+ * @param exception the exception got from fetching the result
+ * @param deadlineTs the timestamp after which this method should throw a TimeoutException
+ * @return the result of the procedure, which may be the same as the passed one
+ */
+ protected V postOpeartionFailure(final IOException exception, final long deadlineTs)
+ throws IOException, TimeoutException {
+ throw exception;
+ }
+
+ protected interface WaitForStateCallable {
+ boolean checkState(int tries) throws IOException;
+ void throwInterruptedException() throws InterruptedIOException;
+ void throwTimeoutException(long elapsed) throws TimeoutException;
+ }
+
+ protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
+ throws IOException, TimeoutException {
+ int tries = 0;
+ IOException serverEx = null;
+ long startTime = EnvironmentEdgeManager.currentTime();
+ while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
+ serverEx = null;
+ try {
+ if (callable.checkState(tries)) {
+ return;
+ }
+ } catch (IOException e) {
+ serverEx = e;
+ }
+ try {
+ Thread.sleep(getAdmin().getPauseTime(tries++));
+ } catch (InterruptedException e) {
+ callable.throwInterruptedException();
+ }
+ }
+ if (serverEx != null) {
+ throw unwrapException(serverEx);
+ } else {
+ callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java
new file mode 100644
index 0000000..da3ffe9
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java
@@ -0,0 +1,186 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({ClientTests.class, SmallTests.class})
+public class TestProcedureFuture {
+ private static class TestFuture extends HBaseAdmin.ProcedureFuture<Void> {
+ private boolean postOperationResultCalled = false;
+ private boolean waitOperationResultCalled = false;
+ private boolean getProcedureResultCalled = false;
+ private boolean convertResultCalled = false;
+
+ public TestFuture(final HBaseAdmin admin, final Long procId) {
+ super(admin, procId);
+ }
+
+ public boolean wasPostOperationResultCalled() {
+ return postOperationResultCalled;
+ }
+
+ public boolean wasWaitOperationResultCalled() {
+ return waitOperationResultCalled;
+ }
+
+ public boolean wasGetProcedureResultCalled() {
+ return getProcedureResultCalled;
+ }
+
+ public boolean wasConvertResultCalled() {
+ return convertResultCalled;
+ }
+
+ @Override
+ protected GetProcedureResultResponse getProcedureResult(
+ final GetProcedureResultRequest request) throws IOException {
+ getProcedureResultCalled = true;
+ return GetProcedureResultResponse.newBuilder()
+ .setState(GetProcedureResultResponse.State.FINISHED)
+ .build();
+ }
+
+ @Override
+ protected Void convertResult(final GetProcedureResultResponse response) throws IOException {
+ convertResultCalled = true;
+ return null;
+ }
+
+ @Override
+ protected Void waitOperationResult(final long deadlineTs)
+ throws IOException, TimeoutException {
+ waitOperationResultCalled = true;
+ return null;
+ }
+
+ @Override
+ protected Void postOperationResult(final Void result, final long deadlineTs)
+ throws IOException, TimeoutException {
+ postOperationResultCalled = true;
+ return result;
+ }
+ }
+
+ /**
+ * When a master return a result with procId,
+ * we are skipping the waitOperationResult() call,
+ * since we are getting the procedure result.
+ */
+ @Test(timeout=60000)
+ public void testWithProcId() throws Exception {
+ HBaseAdmin admin = Mockito.mock(HBaseAdmin.class);
+ TestFuture f = new TestFuture(admin, 100L);
+ f.get(1, TimeUnit.MINUTES);
+
+ assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled());
+ assertTrue("expected convertResult() to be called", f.wasConvertResultCalled());
+ assertFalse("unexpected waitOperationResult() called", f.wasWaitOperationResultCalled());
+ assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled());
+ }
+
+ /**
+ * Verify that the spin loop for the procedure running works.
+ */
+ @Test(timeout=60000)
+ public void testWithProcIdAndSpinning() throws Exception {
+ final AtomicInteger spinCount = new AtomicInteger(0);
+ HBaseAdmin admin = Mockito.mock(HBaseAdmin.class);
+ TestFuture f = new TestFuture(admin, 100L) {
+ @Override
+ protected GetProcedureResultResponse getProcedureResult(
+ final GetProcedureResultRequest request) throws IOException {
+ boolean done = spinCount.incrementAndGet() >= 10;
+ return GetProcedureResultResponse.newBuilder()
+ .setState(done ? GetProcedureResultResponse.State.FINISHED :
+ GetProcedureResultResponse.State.RUNNING)
+ .build();
+ }
+ };
+ f.get(1, TimeUnit.MINUTES);
+
+ assertEquals(10, spinCount.get());
+ assertTrue("expected convertResult() to be called", f.wasConvertResultCalled());
+ assertFalse("unexpected waitOperationResult() called", f.wasWaitOperationResultCalled());
+ assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled());
+ }
+
+ /**
+ * When a master return a result without procId,
+ * we are skipping the getProcedureResult() call.
+ */
+ @Test(timeout=60000)
+ public void testWithoutProcId() throws Exception {
+ HBaseAdmin admin = Mockito.mock(HBaseAdmin.class);
+ TestFuture f = new TestFuture(admin, null);
+ f.get(1, TimeUnit.MINUTES);
+
+ assertFalse("unexpected getProcedureResult() called", f.wasGetProcedureResultCalled());
+ assertFalse("unexpected convertResult() called", f.wasConvertResultCalled());
+ assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled());
+ assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled());
+ }
+
+ /**
+ * When a new client with procedure support tries to ask an old-master without proc-support
+ * the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException)
+ * The future should trap that and fallback to the waitOperationResult().
+ *
+ * This happens when the operation calls happens on a "new master" but while we are waiting
+ * the operation to be completed, we failover on an "old master".
+ */
+ @Test(timeout=60000)
+ public void testOnServerWithNoProcedureSupport() throws Exception {
+ HBaseAdmin admin = Mockito.mock(HBaseAdmin.class);
+ TestFuture f = new TestFuture(admin, 100L) {
+ @Override
+ protected GetProcedureResultResponse getProcedureResult(
+ final GetProcedureResultRequest request) throws IOException {
+ super.getProcedureResult(request);
+ throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult"));
+ }
+ };
+ f.get(1, TimeUnit.MINUTES);
+
+ assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled());
+ assertFalse("unexpected convertResult() called", f.wasConvertResultCalled());
+ assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled());
+ assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled());
+ }
+}
\ No newline at end of file
[49/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index fcc93db,91c406c..17c1ee3
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@@ -44,16 -42,14 +43,19 @@@ import org.apache.hadoop.hbase.exceptio
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+ import org.apache.hadoop.hbase.procedure2.Procedure;
+ import org.apache.hadoop.hbase.procedure2.ProcedureResult;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.*;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@@ -1311,108 -1352,11 +1358,116 @@@ public class MasterRpcServices extends
return response.build();
}
+ /**
+ * Compact a region on the master.
+ *
+ * @param controller the RPC controller
+ * @param request the request
+ * @throws ServiceException
+ */
+ @Override
+ @QosPriority(priority=HConstants.ADMIN_QOS)
+ public CompactRegionResponse compactRegion(final RpcController controller,
+ final CompactRegionRequest request) throws ServiceException {
+ try {
+ master.checkInitialized();
+ byte[] regionName = request.getRegion().getValue().toByteArray();
+ TableName tableName = HRegionInfo.getTable(regionName);
+ // if the region is a mob region, do the mob file compaction.
+ if (MobUtils.isMobRegionName(tableName, regionName)) {
+ return compactMob(request, tableName);
+ } else {
+ return super.compactRegion(controller, request);
+ }
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
+
+ @Override
+ @QosPriority(priority=HConstants.ADMIN_QOS)
+ public GetRegionInfoResponse getRegionInfo(final RpcController controller,
+ final GetRegionInfoRequest request) throws ServiceException {
+ try {
+ master.checkInitialized();
+ byte[] regionName = request.getRegion().getValue().toByteArray();
+ TableName tableName = HRegionInfo.getTable(regionName);
+ if (MobUtils.isMobRegionName(tableName, regionName)) {
+ // a dummy region info contains the compaction state.
+ HRegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName);
+ GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
+ builder.setRegionInfo(HRegionInfo.convert(mobRegionInfo));
+ if (request.hasCompactionState() && request.getCompactionState()) {
+ builder.setCompactionState(master.getMobCompactionState(tableName));
+ }
+ return builder.build();
+ } else {
+ return super.getRegionInfo(controller, request);
+ }
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
+
+ /**
+ * Compacts the mob files in the current table.
+ * @param request the request.
+ * @param tableName the current table name.
+ * @return The response of the mob file compaction.
+ * @throws IOException
+ */
+ private CompactRegionResponse compactMob(final CompactRegionRequest request,
+ TableName tableName) throws IOException {
+ if (!master.getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
+ throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
+ }
+ boolean isForceAllFiles = false;
+ List<HColumnDescriptor> compactedColumns = new ArrayList<HColumnDescriptor>();
+ HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
+ byte[] family = null;
+ if (request.hasFamily()) {
+ family = request.getFamily().toByteArray();
+ for (HColumnDescriptor hcd : hcds) {
+ if (Bytes.equals(family, hcd.getName())) {
+ if (!hcd.isMobEnabled()) {
+ LOG.error("Column family " + hcd.getName() + " is not a mob column family");
+ throw new DoNotRetryIOException("Column family " + hcd.getName()
- + " is not a mob column family");
++ + " is not a mob column family");
+ }
+ compactedColumns.add(hcd);
+ }
+ }
+ } else {
+ for (HColumnDescriptor hcd : hcds) {
+ if (hcd.isMobEnabled()) {
+ compactedColumns.add(hcd);
+ }
+ }
+ }
+ if (compactedColumns.isEmpty()) {
+ LOG.error("No mob column families are assigned in the mob file compaction");
+ throw new DoNotRetryIOException(
- "No mob column families are assigned in the mob file compaction");
++ "No mob column families are assigned in the mob file compaction");
+ }
+ if (request.hasMajor() && request.getMajor()) {
+ isForceAllFiles = true;
+ }
+ String familyLogMsg = (family != null) ? Bytes.toString(family) : "";
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("User-triggered mob file compaction requested for table: "
- + tableName.getNameAsString() + " for column family: " + familyLogMsg);
++ + tableName.getNameAsString() + " for column family: " + familyLogMsg);
+ }
+ master.mobFileCompactThread.requestMobFileCompaction(master.getConfiguration(),
- master.getFileSystem(), tableName, compactedColumns,
- master.getTableLockManager(), isForceAllFiles);
++ master.getFileSystem(), tableName, compactedColumns,
++ master.getTableLockManager(), isForceAllFiles);
+ return CompactRegionResponse.newBuilder().build();
+ }
++
+ @Override
+ public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller,
+ IsBalancerEnabledRequest request) throws ServiceException {
+ IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder();
+ response.setEnabled(master.isBalancerOn());
+ return response.build();
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
index 0664a55,d729cfa..cbff5dd
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
@@@ -27,9 -27,15 +27,10 @@@ import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+ import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 0000000,2582a1e..7809e55
mode 000000,100644..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@@ -1,0 -1,422 +1,450 @@@
+ /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ package org.apache.hadoop.hbase.master.procedure;
+
+ import java.io.InputStream;
+ import java.io.IOException;
+ import java.io.OutputStream;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.List;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
++import org.apache.hadoop.hbase.*;
+ import org.apache.hadoop.hbase.classification.InterfaceAudience;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.HRegionInfo;
+ import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+ import org.apache.hadoop.hbase.client.ClusterConnection;
+ import org.apache.hadoop.hbase.client.Delete;
+ import org.apache.hadoop.hbase.client.Result;
+ import org.apache.hadoop.hbase.client.ResultScanner;
+ import org.apache.hadoop.hbase.client.Scan;
+ import org.apache.hadoop.hbase.client.Table;
+ import org.apache.hadoop.hbase.exceptions.HBaseException;
++import org.apache.hadoop.hbase.mob.MobConstants;
++import org.apache.hadoop.hbase.mob.MobUtils;
+ import org.apache.hadoop.hbase.regionserver.HRegion;
+ import org.apache.hadoop.hbase.master.AssignmentManager;
+ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+ import org.apache.hadoop.hbase.master.MasterFileSystem;
+ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+ import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+ import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+ import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+ import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+ import org.apache.hadoop.hbase.util.FSUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+
+ @InterfaceAudience.Private
+ public class DeleteTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, DeleteTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DeleteTableProcedure.class);
+
+ private List<HRegionInfo> regions;
+ private UserGroupInformation user;
+ private TableName tableName;
+
+ // used for compatibility with old clients
+ private final ProcedurePrepareLatch syncLatch;
+
+ public DeleteTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ syncLatch = null;
+ }
+
+ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ this(env, tableName, null);
+ }
+
+ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName,
+ final ProcedurePrepareLatch syncLatch) throws IOException {
+ this.tableName = tableName;
+ this.user = env.getRequestUser().getUGI();
+
+ // used for compatibility with clients without procedures
+ // they need a sync TableNotFoundException, TableNotDisabledException, ...
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case DELETE_TABLE_PRE_OPERATION:
+ // Verify if we can delete the table
+ boolean deletable = prepareDelete(env);
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ if (!deletable) {
+ assert isFailed() : "the delete should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+
+ // TODO: Move out... in the acquireLock()
+ LOG.debug("waiting for '" + getTableName() + "' regions in transition");
+ regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
+ ProcedureSyncWait.waitRegionInTransition(env, regions);
+
+ // Call coprocessors
+ preDelete(env);
+
+ setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META);
+ break;
+ case DELETE_TABLE_REMOVE_FROM_META:
+ LOG.debug("delete '" + getTableName() + "' regions from META");
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
+ setNextState(DeleteTableState.DELETE_TABLE_CLEAR_FS_LAYOUT);
+ break;
+ case DELETE_TABLE_CLEAR_FS_LAYOUT:
+ LOG.debug("delete '" + getTableName() + "' from filesystem");
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
+ setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE);
+ regions = null;
+ break;
+ case DELETE_TABLE_UPDATE_DESC_CACHE:
+ LOG.debug("delete '" + getTableName() + "' descriptor");
+ DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName());
+ setNextState(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS);
+ break;
+ case DELETE_TABLE_UNASSIGN_REGIONS:
+ LOG.debug("delete '" + getTableName() + "' assignment state");
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ setNextState(DeleteTableState.DELETE_TABLE_POST_OPERATION);
+ break;
+ case DELETE_TABLE_POST_OPERATION:
+ postDelete(env);
+ LOG.debug("delete '" + getTableName() + "' completed");
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (HBaseException|IOException e) {
+ LOG.warn("Retriable error trying to delete table=" + getTableName() + " state=" + state, e);
+ } catch (InterruptedException e) {
+ // if the interrupt is real, the executor will be stopped.
+ LOG.warn("Interrupted trying to delete table=" + getTableName() + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final DeleteTableState state) {
+ if (state == DeleteTableState.DELETE_TABLE_PRE_OPERATION) {
+ // nothing to rollback, pre-delete is just table-state checks.
+ // We can fail if the table does not exist or is not disabled.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ return;
+ }
+
+ // The delete doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+
+ @Override
+ protected DeleteTableState getState(final int stateId) {
+ return DeleteTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final DeleteTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected DeleteTableState getInitialState() {
+ return DeleteTableState.DELETE_TABLE_PRE_OPERATION;
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.DELETE;
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ // TODO: We may be able to abort if the procedure is not started yet.
+ return false;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "delete table");
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DeleteTableStateData.Builder state =
+ MasterProcedureProtos.DeleteTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName));
+ if (regions != null) {
+ for (HRegionInfo hri: regions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.DeleteTableStateData state =
+ MasterProcedureProtos.DeleteTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ tableName = ProtobufUtil.toTableName(state.getTableName());
+ if (state.getRegionInfoCount() == 0) {
+ regions = null;
+ } else {
+ regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ regions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ private boolean prepareDelete(final MasterProcedureEnv env) throws IOException {
+ try {
+ env.getMasterServices().checkTableModifiable(tableName);
+ } catch (TableNotFoundException|TableNotDisabledException e) {
+ setFailure("master-delete-table", e);
+ return false;
+ }
+ return true;
+ }
+
+ private boolean preDelete(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = this.tableName;
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.preDeleteTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ return true;
+ }
+
+ private void postDelete(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ deleteTableStates(env, tableName);
+
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = this.tableName;
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.postDeleteTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ }
+
+ protected static void deleteFromFs(final MasterProcedureEnv env,
+ final TableName tableName, final List<HRegionInfo> regions,
+ final boolean archive) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ final FileSystem fs = mfs.getFileSystem();
+ final Path tempdir = mfs.getTempDir();
+
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
+
+ if (fs.exists(tableDir)) {
+ // Ensure temp exists
+ if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ // Ensure parent exists
+ if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ // Move the table in /hbase/.tmp
+ if (!fs.rename(tableDir, tempTableDir)) {
+ if (fs.exists(tempTableDir)) {
+ // TODO
+ // what's in this dir? something old? probably something manual from the user...
+ // let's get rid of this stuff...
+ FileStatus[] files = fs.listStatus(tempdir);
+ if (files != null && files.length > 0) {
+ for (int i = 0; i < files.length; ++i) {
+ if (!files[i].isDir()) continue;
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath());
+ }
+ }
+ fs.delete(tempdir, true);
+ }
+ throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
+ }
+ }
+
+ // Archive regions from FS (temp directory)
+ if (archive) {
+ for (HRegionInfo hri : regions) {
+ LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
+ tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+ }
+ LOG.debug("Table '" + tableName + "' archived!");
+ }
+
++ // Archive the mob data if there is a mob-enabled column
++ HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
++ HColumnDescriptor[] hcds = htd.getColumnFamilies();
++ boolean hasMob = false;
++ for (HColumnDescriptor hcd : hcds) {
++ if (hcd.isMobEnabled()) {
++ hasMob = true;
++ break;
++ }
++ }
++ Path mobTableDir = null;
++ if (hasMob) {
++ // Archive mob data
++ mobTableDir = FSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME),
++ tableName);
++ Path regionDir =
++ new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName());
++ if (fs.exists(regionDir)) {
++ HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir);
++ }
++ }
++
++
+ // Delete table directory from FS (temp directory)
+ if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
+ throw new IOException("Couldn't delete " + tempTableDir);
+ }
++
++ // Delete the table directory where the mob files are saved
++ if (hasMob && mobTableDir != null && fs.exists(mobTableDir)) {
++ if (!fs.delete(mobTableDir, true)) {
++ LOG.error("Couldn't delete " + mobTableDir);
++ }
++ }
+ }
+
+ /**
+ * There may be items for this table still up in hbase:meta in the case where the
+ * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
+ * that have to do with this table. See HBASE-12980.
+ * @throws IOException
+ */
+ private static void cleanAnyRemainingRows(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ ClusterConnection connection = env.getMasterServices().getConnection();
+ Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName);
+ try (Table metaTable =
+ connection.getTable(TableName.META_TABLE_NAME)) {
+ List<Delete> deletes = new ArrayList<Delete>();
+ try (ResultScanner resScanner = metaTable.getScanner(tableScan)) {
+ for (Result result : resScanner) {
+ deletes.add(new Delete(result.getRow()));
+ }
+ }
+ if (!deletes.isEmpty()) {
+ LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName +
+ " from " + TableName.META_TABLE_NAME);
+ metaTable.delete(deletes);
+ }
+ }
+ }
+
+ protected static void deleteFromMeta(final MasterProcedureEnv env,
+ final TableName tableName, List<HRegionInfo> regions) throws IOException {
+ MetaTableAccessor.deleteRegions(env.getMasterServices().getConnection(), regions);
+
+ // Clean any remaining rows for this table.
+ cleanAnyRemainingRows(env, tableName);
+ }
+
+ protected static void deleteAssignmentState(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ AssignmentManager am = env.getMasterServices().getAssignmentManager();
+
+ // Clean up regions of the table in RegionStates.
+ LOG.debug("Removing '" + tableName + "' from region states.");
+ am.getRegionStates().tableDeleted(tableName);
+
+ // If entry for this table states, remove it.
+ LOG.debug("Marking '" + tableName + "' as deleted.");
+ am.getTableStateManager().setDeletedTable(tableName);
+ }
+
+ protected static void deleteTableDescriptorCache(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ LOG.debug("Removing '" + tableName + "' descriptor.");
+ env.getMasterServices().getTableDescriptors().remove(tableName);
+ }
+
+ protected static void deleteTableStates(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ getMasterQuotaManager(env).removeTableFromNamespaceQuota(tableName);
+ }
+
+ private static MasterQuotaManager getMasterQuotaManager(final MasterProcedureEnv env)
+ throws IOException {
+ return ProcedureSyncWait.waitFor(env, "quota manager to be available",
+ new ProcedureSyncWait.Predicate<MasterQuotaManager>() {
+ @Override
+ public MasterQuotaManager evaluate() throws IOException {
+ return env.getMasterServices().getMasterQuotaManager();
+ }
+ });
+ }
+ }
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java
index c2abc7c,0000000..d54dca4
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java
@@@ -1,308 -1,0 +1,304 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.client.Scan;
- import org.apache.hadoop.hbase.regionserver.HMobStore;
- import org.apache.hadoop.hbase.regionserver.HStore;
- import org.apache.hadoop.hbase.regionserver.InternalScanner;
- import org.apache.hadoop.hbase.regionserver.MobCompactionStoreScanner;
- import org.apache.hadoop.hbase.regionserver.ScanType;
- import org.apache.hadoop.hbase.regionserver.Store;
- import org.apache.hadoop.hbase.regionserver.StoreFile;
++import org.apache.hadoop.hbase.regionserver.*;
+import org.apache.hadoop.hbase.regionserver.StoreFile.Writer;
- import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
+import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Compact passed set of files in the mob-enabled column family.
+ */
+@InterfaceAudience.Private
+public class DefaultMobCompactor extends DefaultCompactor {
+
+ private static final Log LOG = LogFactory.getLog(DefaultMobCompactor.class);
+ private long mobSizeThreshold;
+ private HMobStore mobStore;
+ public DefaultMobCompactor(Configuration conf, Store store) {
+ super(conf, store);
+ // The mob cells reside in the mob-enabled column family which is held by HMobStore.
+ // During the compaction, the compactor reads the cells from the mob files and
+ // probably creates new mob files. All of these operations are included in HMobStore,
+ // so we need to cast the Store to HMobStore.
+ if (!(store instanceof HMobStore)) {
+ throw new IllegalArgumentException("The store " + store + " is not a HMobStore");
+ }
+ mobStore = (HMobStore) store;
+ mobSizeThreshold = store.getFamily().getMobThreshold();
+ }
+
+ /**
+ * Creates a writer for a new file in a temporary directory.
+ * @param fd The file details.
+ * @param smallestReadPoint The smallest mvcc readPoint across all the scanners in this region.
+ * @return Writer for a new StoreFile in the tmp dir.
+ * @throws IOException
+ */
+ @Override
+ protected Writer createTmpWriter(FileDetails fd, long smallestReadPoint) throws IOException {
+ // make this writer with tags always because of possible new cells with tags.
+ StoreFile.Writer writer = store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression,
+ true, fd.maxMVCCReadpoint >= smallestReadPoint, true);
+ return writer;
+ }
+
+ @Override
+ protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners,
+ ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
+ Scan scan = new Scan();
+ scan.setMaxVersions(store.getFamily().getMaxVersions());
+ if (scanType == ScanType.COMPACT_DROP_DELETES) {
+ scanType = ScanType.COMPACT_RETAIN_DELETES;
+ return new MobCompactionStoreScanner(store, store.getScanInfo(), scan, scanners,
+ scanType, smallestReadPoint, earliestPutTs, true);
+ } else {
+ return new MobCompactionStoreScanner(store, store.getScanInfo(), scan, scanners,
+ scanType, smallestReadPoint, earliestPutTs, false);
+ }
+ }
+
+ // TODO refactor to take advantage of the throughput controller.
+
+ /**
+ * Performs compaction on a column family with the mob flag enabled.
+ * This is for when the mob threshold size has changed or if the mob
+ * column family mode has been toggled via an alter table statement.
+ * Compacts the files by the following rules.
+ * 1. If the cell has a mob reference tag, the cell's value is the path of the mob file.
+ * <ol>
+ * <li>
+ * If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
+ * directly copy the (with mob tag) cell into the new store file.
+ * </li>
+ * <li>
+ * Otherwise, retrieve the mob cell from the mob file, and writes a copy of the cell into
+ * the new store file.
+ * </li>
+ * </ol>
+ * 2. If the cell doesn't have a reference tag.
+ * <ol>
+ * <li>
+ * If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
+ * write this cell to a mob file, and write the path of this mob file to the store file.
+ * </li>
+ * <li>
+ * Otherwise, directly write this cell into the store file.
+ * </li>
+ * </ol>
+ * In the mob compaction, the {@link MobCompactionStoreScanner} is used as a scanner
+ * which could output the normal cells and delete markers together when required.
+ * After the major compaction on the normal hfiles, we have a guarantee that we have purged all
+ * deleted or old version mob refs, and the delete markers are written to a del file with the
+ * suffix _del. Because of this, it is safe to use the del file in the mob compaction.
+ * The mob compaction doesn't take place in the normal hfiles, it occurs directly in the
+ * mob files. When the small mob files are merged into bigger ones, the del file is added into
+ * the scanner to filter the deleted cells.
+ * @param fd File details
+ * @param scanner Where to read from.
+ * @param writer Where to write to.
+ * @param smallestReadPoint Smallest read point.
+ * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
+ * @param major Is a major compaction.
+ * @return Whether compaction ended; false if it was interrupted for any reason.
+ */
+ @Override
+ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer,
+ long smallestReadPoint, boolean cleanSeqId,
+ CompactionThroughputController throughputController, boolean major) throws IOException {
+ if (!(scanner instanceof MobCompactionStoreScanner)) {
+ throw new IllegalArgumentException(
+ "The scanner should be an instance of MobCompactionStoreScanner");
+ }
+ MobCompactionStoreScanner compactionScanner = (MobCompactionStoreScanner) scanner;
+ int bytesWritten = 0;
+ // Since scanner.next() can return 'false' but still be delivering data,
+ // we have to use a do/while loop.
+ List<Cell> cells = new ArrayList<Cell>();
+ // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME
+ int closeCheckInterval = HStore.getCloseCheckInterval();
+ boolean hasMore;
+ Path path = MobUtils.getMobFamilyPath(conf, store.getTableName(), store.getColumnFamilyName());
+ byte[] fileName = null;
+ StoreFile.Writer mobFileWriter = null;
+ StoreFile.Writer delFileWriter = null;
+ long mobCells = 0;
+ long deleteMarkersCount = 0;
+ Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName()
+ .getName());
+ long mobCompactedIntoMobCellsCount = 0;
+ long mobCompactedFromMobCellsCount = 0;
+ long mobCompactedIntoMobCellsSize = 0;
+ long mobCompactedFromMobCellsSize = 0;
+ try {
+ try {
+ // If the mob file writer could not be created, directly write the cell to the store file.
+ mobFileWriter = mobStore.createWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount,
+ store.getFamily().getCompression(), store.getRegionInfo().getStartKey());
+ fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
+ } catch (IOException e) {
+ LOG.error(
+ "Fail to create mob writer, "
+ + "we will continue the compaction by writing MOB cells directly in store files",
+ e);
+ }
+ delFileWriter = mobStore.createDelFileWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount,
+ store.getFamily().getCompression(), store.getRegionInfo().getStartKey());
++ ScannerContext scannerContext =
++ ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
++
++
+ do {
- hasMore = compactionScanner.next(cells, compactionKVMax);
++ hasMore = compactionScanner.next(cells, scannerContext);
+ // output to writer:
+ for (Cell c : cells) {
+ if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) {
+ CellUtil.setSequenceId(c, 0);
+ }
+ if (compactionScanner.isOutputDeleteMarkers() && CellUtil.isDelete(c)) {
+ delFileWriter.append(c);
+ deleteMarkersCount++;
+ } else if (mobFileWriter == null || c.getTypeByte() != KeyValue.Type.Put.getCode()) {
+ // If the mob file writer is null or the kv type is not put, directly write the cell
+ // to the store file.
+ writer.append(c);
+ } else if (MobUtils.isMobReferenceCell(c)) {
+ if (MobUtils.hasValidMobRefCellValue(c)) {
+ int size = MobUtils.getMobValueLength(c);
+ if (size > mobSizeThreshold) {
+ // If the value size is larger than the threshold, it's regarded as a mob. Since
+ // its value is already in the mob file, directly write this cell to the store file
+ writer.append(c);
+ } else {
+ // If the value is not larger than the threshold, it's not regarded a mob. Retrieve
+ // the mob cell from the mob file, and write it back to the store file.
+ Cell mobCell = mobStore.resolve(c, false);
+ if (mobCell.getValueLength() != 0) {
+ // put the mob data back to the store file
- // KeyValue mobKv = KeyValueUtil.ensureKeyValue(cell);
+ CellUtil.setSequenceId(mobCell, c.getSequenceId());
+ writer.append(mobCell);
+ mobCompactedFromMobCellsCount++;
+ mobCompactedFromMobCellsSize += mobCell.getValueLength();
+ } else {
+ // If the value of a file is empty, there might be issues when retrieving,
+ // directly write the cell to the store file, and leave it to be handled by the
+ // next compaction.
+ writer.append(c);
+ }
+ }
+ } else {
+ LOG.warn("The value format of the KeyValue " + c
+ + " is wrong, its length is less than " + Bytes.SIZEOF_INT);
+ writer.append(c);
+ }
+ } else if (c.getValueLength() <= mobSizeThreshold) {
+ // If the value size of a cell is not larger than the threshold, directly write it to
+ // the store file.
+ writer.append(c);
+ } else {
+ // If the value size of a cell is larger than the threshold, it's regarded as a mob,
+ // write this cell to a mob file, and write the path to the store file.
+ mobCells++;
+ // append the original keyValue in the mob file.
+ mobFileWriter.append(c);
+ KeyValue reference = MobUtils.createMobRefKeyValue(c, fileName, tableNameTag);
+ // write the cell whose value is the path of a mob file to the store file.
+ writer.append(reference);
+ mobCompactedIntoMobCellsCount++;
+ mobCompactedIntoMobCellsSize += c.getValueLength();
+ }
+ ++progress.currentCompactedKVs;
+
+ // check periodically to see if a system stop is requested
+ if (closeCheckInterval > 0) {
+ bytesWritten += KeyValueUtil.length(c);
+ if (bytesWritten > closeCheckInterval) {
+ bytesWritten = 0;
+ if (!store.areWritesEnabled()) {
+ progress.cancel();
+ return false;
+ }
+ }
+ }
+ }
+ cells.clear();
+ } while (hasMore);
+ } finally {
+ if (mobFileWriter != null) {
+ mobFileWriter.appendMetadata(fd.maxSeqId, major, mobCells);
+ mobFileWriter.close();
+ }
+ if (delFileWriter != null) {
+ delFileWriter.appendMetadata(fd.maxSeqId, major, deleteMarkersCount);
+ delFileWriter.close();
+ }
+ }
+ if (mobFileWriter != null) {
+ if (mobCells > 0) {
+ // If the mob file is not empty, commit it.
+ mobStore.commitFile(mobFileWriter.getPath(), path);
+ } else {
+ try {
+ // If the mob file is empty, delete it instead of committing.
+ store.getFileSystem().delete(mobFileWriter.getPath(), true);
+ } catch (IOException e) {
+ LOG.error("Fail to delete the temp mob file", e);
+ }
+ }
+ }
+ if (delFileWriter != null) {
+ if (deleteMarkersCount > 0) {
+ // If the del file is not empty, commit it.
+ // If the commit fails, the compaction is re-performed again.
+ mobStore.commitFile(delFileWriter.getPath(), path);
+ } else {
+ try {
+ // If the del file is empty, delete it instead of committing.
+ store.getFileSystem().delete(delFileWriter.getPath(), true);
+ } catch (IOException e) {
+ LOG.error("Fail to delete the temp del file", e);
+ }
+ }
+ }
+ mobStore.updateMobCompactedFromMobCellsCount(mobCompactedFromMobCellsCount);
+ mobStore.updateMobCompactedIntoMobCellsCount(mobCompactedIntoMobCellsCount);
+ mobStore.updateMobCompactedFromMobCellsSize(mobCompactedFromMobCellsSize);
+ mobStore.updateMobCompactedIntoMobCellsSize(mobCompactedIntoMobCellsSize);
+ progress.complete();
+ return true;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index 00b3421,0000000..44387f5
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@@ -1,222 -1,0 +1,222 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
- import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
- import org.apache.hadoop.hbase.regionserver.HMobStore;
- import org.apache.hadoop.hbase.regionserver.InternalScanner;
- import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
- import org.apache.hadoop.hbase.regionserver.Store;
- import org.apache.hadoop.hbase.regionserver.StoreFile;
++import org.apache.hadoop.hbase.regionserver.*;
+import org.apache.hadoop.hbase.util.Bytes;
++import org.apache.hadoop.util.StringUtils;
+
+/**
+ * An implementation of the StoreFlusher. It extends the DefaultStoreFlusher.
+ * If the store is not a mob store, the flusher flushes the MemStore the same with
+ * DefaultStoreFlusher,
+ * If the store is a mob store, the flusher flushes the MemStore into two places.
+ * One is the store files of HBase, the other is the mob files.
+ * <ol>
+ * <li>Cells that are not PUT type or have the delete mark will be directly flushed to HBase.</li>
+ * <li>If the size of a cell value is larger than a threshold, it'll be flushed
+ * to a mob file, another cell with the path of this file will be flushed to HBase.</li>
+ * <li>If the size of a cell value is smaller than or equal with a threshold, it'll be flushed to
+ * HBase directly.</li>
+ * </ol>
+ *
+ */
+@InterfaceAudience.Private
+public class DefaultMobStoreFlusher extends DefaultStoreFlusher {
+
+ private static final Log LOG = LogFactory.getLog(DefaultMobStoreFlusher.class);
+ private final Object flushLock = new Object();
+ private long mobCellValueSizeThreshold = 0;
+ private Path targetPath;
+ private HMobStore mobStore;
+
+ public DefaultMobStoreFlusher(Configuration conf, Store store) throws IOException {
+ super(conf, store);
+ mobCellValueSizeThreshold = store.getFamily().getMobThreshold();
+ this.targetPath = MobUtils.getMobFamilyPath(conf, store.getTableName(),
+ store.getColumnFamilyName());
+ if (!this.store.getFileSystem().exists(targetPath)) {
+ this.store.getFileSystem().mkdirs(targetPath);
+ }
+ this.mobStore = (HMobStore) store;
+ }
+
+ /**
+ * Flushes the snapshot of the MemStore.
+ * If this store is not a mob store, flush the cells in the snapshot to store files of HBase.
+ * If the store is a mob one, the flusher flushes the MemStore into two places.
+ * One is the store files of HBase, the other is the mob files.
+ * <ol>
+ * <li>Cells that are not PUT type or have the delete mark will be directly flushed to
+ * HBase.</li>
+ * <li>If the size of a cell value is larger than a threshold, it'll be
+ * flushed to a mob file, another cell with the path of this file will be flushed to HBase.</li>
+ * <li>If the size of a cell value is smaller than or equal with a threshold, it'll be flushed to
+ * HBase directly.</li>
+ * </ol>
+ */
+ @Override
+ public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
+ MonitoredTask status) throws IOException {
+ ArrayList<Path> result = new ArrayList<Path>();
+ int cellsCount = snapshot.getCellsCount();
+ if (cellsCount == 0) return result; // don't flush if there are no entries
+
+ // Use a store scanner to find which rows to flush.
+ long smallestReadPoint = store.getSmallestReadPoint();
+ InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
+ if (scanner == null) {
+ return result; // NULL scanner returned from coprocessor hooks means skip normal processing
+ }
+ StoreFile.Writer writer;
+ try {
+ // TODO: We can fail in the below block before we complete adding this flush to
+ // list of store files. Add cleanup of anything put on filesystem if we fail.
+ synchronized (flushLock) {
+ status.setStatus("Flushing " + store + ": creating writer");
+ // Write the map out to the disk
+ writer = store.createWriterInTmp(cellsCount, store.getFamily().getCompression(),
+ false, true, true);
+ writer.setTimeRangeTracker(snapshot.getTimeRangeTracker());
+ try {
+ // It's a mob store, flush the cells in a mob way. This is the difference of flushing
+ // between a normal and a mob store.
+ performMobFlush(snapshot, cacheFlushId, scanner, writer, status);
+ } finally {
+ finalizeWriter(writer, cacheFlushId, status);
+ }
+ }
+ } finally {
+ scanner.close();
+ }
+ LOG.info("Flushed, sequenceid=" + cacheFlushId + ", memsize="
- + snapshot.getSize() + ", hasBloomFilter=" + writer.hasGeneralBloom()
- + ", into tmp file " + writer.getPath());
++ + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getSize(), "", 1) +
++ ", hasBloomFilter=" + writer.hasGeneralBloom() +
++ ", into tmp file " + writer.getPath());
+ result.add(writer.getPath());
+ return result;
+ }
+
+ /**
+ * Flushes the cells in the mob store.
+ * <ol>In the mob store, the cells with PUT type might have or have no mob tags.
+ * <li>If a cell does not have a mob tag, flushing the cell to different files depends
+ * on the value length. If the length is larger than a threshold, it's flushed to a
+ * mob file and the mob file is flushed to a store file in HBase. Otherwise, directly
+ * flush the cell to a store file in HBase.</li>
+ * <li>If a cell have a mob tag, its value is a mob file name, directly flush it
+ * to a store file in HBase.</li>
+ * </ol>
+ * @param snapshot Memstore snapshot.
+ * @param cacheFlushId Log cache flush sequence number.
+ * @param scanner The scanner of memstore snapshot.
+ * @param writer The store file writer.
+ * @param status Task that represents the flush operation and may be updated with status.
+ * @throws IOException
+ */
+ protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId,
+ InternalScanner scanner, StoreFile.Writer writer, MonitoredTask status) throws IOException {
+ StoreFile.Writer mobFileWriter = null;
+ int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX,
+ HConstants.COMPACTION_KV_MAX_DEFAULT);
+ long mobCount = 0;
+ long mobSize = 0;
+ long time = snapshot.getTimeRangeTracker().getMaximumTimestamp();
+ mobFileWriter = mobStore.createWriterInTmp(new Date(time), snapshot.getCellsCount(),
+ store.getFamily().getCompression(), store.getRegionInfo().getStartKey());
+ // the target path is {tableName}/.mob/{cfName}/mobFiles
+ // the relative path is mobFiles
+ byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
+ try {
+ Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName()
+ .getName());
+ List<Cell> cells = new ArrayList<Cell>();
+ boolean hasMore;
++ ScannerContext scannerContext =
++ ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
++
+ do {
- hasMore = scanner.next(cells, compactionKVMax);
++ hasMore = scanner.next(cells, scannerContext);
+ if (!cells.isEmpty()) {
+ for (Cell c : cells) {
+ // If we know that this KV is going to be included always, then let us
+ // set its memstoreTS to 0. This will help us save space when writing to
+ // disk.
+ KeyValue kv = KeyValueUtil.ensureKeyValue(c);
+ if (kv.getValueLength() <= mobCellValueSizeThreshold || MobUtils.isMobReferenceCell(kv)
+ || kv.getTypeByte() != KeyValue.Type.Put.getCode()) {
+ writer.append(kv);
+ } else {
+ // append the original keyValue in the mob file.
+ mobFileWriter.append(kv);
+ mobSize += kv.getValueLength();
+ mobCount++;
+
+ // append the tags to the KeyValue.
+ // The key is same, the value is the filename of the mob file
+ KeyValue reference = MobUtils.createMobRefKeyValue(kv, fileName, tableNameTag);
+ writer.append(reference);
+ }
+ }
+ cells.clear();
+ }
+ } while (hasMore);
+ } finally {
+ status.setStatus("Flushing mob file " + store + ": appending metadata");
+ mobFileWriter.appendMetadata(cacheFlushId, false, mobCount);
+ status.setStatus("Flushing mob file " + store + ": closing flushed file");
+ mobFileWriter.close();
+ }
+
+ if (mobCount > 0) {
+ // commit the mob file from temp folder to target folder.
+ // If the mob file is committed successfully but the store file is not,
+ // the committed mob file will be handled by the sweep tool as an unused
+ // file.
+ mobStore.commitFile(mobFileWriter.getPath(), targetPath);
+ mobStore.updateMobFlushCount();
+ mobStore.updateMobFlushedCellsCount(mobCount);
+ mobStore.updateMobFlushedCellsSize(mobSize);
+ } else {
+ try {
+ // If the mob file is empty, delete it instead of committing.
+ store.getFileSystem().delete(mobFileWriter.getPath(), true);
+ } catch (IOException e) {
+ LOG.error("Fail to delete the temp mob file", e);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
----------------------------------------------------------------------
diff --cc hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
index 0778ac1,0000000..718b513
mode 100644,000000..100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java
@@@ -1,646 -1,0 +1,643 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.filecompactions;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagType;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobFileName;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.filecompactions.MobFileCompactionRequest.CompactionType;
+import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartition;
+import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartitionId;
- import org.apache.hadoop.hbase.regionserver.BloomType;
- import org.apache.hadoop.hbase.regionserver.HStore;
- import org.apache.hadoop.hbase.regionserver.ScanInfo;
- import org.apache.hadoop.hbase.regionserver.ScanType;
- import org.apache.hadoop.hbase.regionserver.StoreFile;
++import org.apache.hadoop.hbase.regionserver.*;
+import org.apache.hadoop.hbase.regionserver.StoreFile.Writer;
- import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
- import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
- import org.apache.hadoop.hbase.regionserver.StoreScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * An implementation of {@link MobFileCompactor} that compacts the mob files in partitions.
+ */
+@InterfaceAudience.Private
+public class PartitionedMobFileCompactor extends MobFileCompactor {
+
+ private static final Log LOG = LogFactory.getLog(PartitionedMobFileCompactor.class);
+ protected long mergeableSize;
+ protected int delFileMaxCount;
+ /** The number of files compacted in a batch */
+ protected int compactionBatchSize;
+ protected int compactionKVMax;
+
+ private Path tempPath;
+ private Path bulkloadPath;
+ private CacheConfig compactionCacheConfig;
+ private Tag tableNameTag;
+
+ public PartitionedMobFileCompactor(Configuration conf, FileSystem fs, TableName tableName,
+ HColumnDescriptor column, ExecutorService pool) {
+ super(conf, fs, tableName, column, pool);
+ mergeableSize = conf.getLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD,
+ MobConstants.DEFAULT_MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD);
+ delFileMaxCount = conf.getInt(MobConstants.MOB_DELFILE_MAX_COUNT,
+ MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
+ // default is 100
+ compactionBatchSize = conf.getInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE,
+ MobConstants.DEFAULT_MOB_FILE_COMPACTION_BATCH_SIZE);
+ tempPath = new Path(MobUtils.getMobHome(conf), MobConstants.TEMP_DIR_NAME);
+ bulkloadPath = new Path(tempPath, new Path(MobConstants.BULKLOAD_DIR_NAME, new Path(
+ tableName.getNamespaceAsString(), tableName.getQualifierAsString())));
+ compactionKVMax = this.conf.getInt(HConstants.COMPACTION_KV_MAX,
+ HConstants.COMPACTION_KV_MAX_DEFAULT);
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
+ compactionCacheConfig = new CacheConfig(copyOfConf);
+ tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
+ }
+
+ @Override
+ public List<Path> compact(List<FileStatus> files, boolean isForceAllFiles) throws IOException {
+ if (files == null || files.isEmpty()) {
+ LOG.info("No candidate mob files");
+ return null;
+ }
+ LOG.info("isForceAllFiles: " + isForceAllFiles);
+ // find the files to compact.
+ PartitionedMobFileCompactionRequest request = select(files, isForceAllFiles);
+ // compact the files.
+ return performCompaction(request);
+ }
+
+ /**
+ * Selects the compacted mob/del files.
+ * Iterates the candidates to find out all the del files and small mob files.
+ * @param candidates All the candidates.
+ * @param isForceAllFiles Whether add all mob files into the compaction.
+ * @return A compaction request.
+ * @throws IOException
+ */
+ protected PartitionedMobFileCompactionRequest select(List<FileStatus> candidates,
+ boolean isForceAllFiles) throws IOException {
+ Collection<FileStatus> allDelFiles = new ArrayList<FileStatus>();
+ Map<CompactionPartitionId, CompactionPartition> filesToCompact =
+ new HashMap<CompactionPartitionId, CompactionPartition>();
+ int selectedFileCount = 0;
+ int irrelevantFileCount = 0;
+ for (FileStatus file : candidates) {
+ if (!file.isFile()) {
+ irrelevantFileCount++;
+ continue;
+ }
+ // group the del files and small files.
+ FileStatus linkedFile = file;
+ if (HFileLink.isHFileLink(file.getPath())) {
+ HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
+ linkedFile = getLinkedFileStatus(link);
+ if (linkedFile == null) {
+ // If the linked file cannot be found, regard it as an irrelevantFileCount file
+ irrelevantFileCount++;
+ continue;
+ }
+ }
+ if (StoreFileInfo.isDelFile(linkedFile.getPath())) {
+ allDelFiles.add(file);
+ } else if (isForceAllFiles || linkedFile.getLen() < mergeableSize) {
+ // add all files if isForceAllFiles is true,
+ // otherwise add the small files to the merge pool
+ MobFileName fileName = MobFileName.create(linkedFile.getPath().getName());
+ CompactionPartitionId id = new CompactionPartitionId(fileName.getStartKey(),
+ fileName.getDate());
+ CompactionPartition compactionPartition = filesToCompact.get(id);
+ if (compactionPartition == null) {
+ compactionPartition = new CompactionPartition(id);
+ compactionPartition.addFile(file);
+ filesToCompact.put(id, compactionPartition);
+ } else {
+ compactionPartition.addFile(file);
+ }
+ selectedFileCount++;
+ }
+ }
+ PartitionedMobFileCompactionRequest request = new PartitionedMobFileCompactionRequest(
+ filesToCompact.values(), allDelFiles);
+ if (candidates.size() == (allDelFiles.size() + selectedFileCount + irrelevantFileCount)) {
+ // all the files are selected
+ request.setCompactionType(CompactionType.ALL_FILES);
+ }
+ LOG.info("The compaction type is " + request.getCompactionType() + ", the request has "
+ + allDelFiles.size() + " del files, " + selectedFileCount + " selected files, and "
+ + irrelevantFileCount + " irrelevant files");
+ return request;
+ }
+
+ /**
+ * Performs the compaction on the selected files.
+ * <ol>
+ * <li>Compacts the del files.</li>
+ * <li>Compacts the selected small mob files and all the del files.</li>
+ * <li>If all the candidates are selected, delete the del files.</li>
+ * </ol>
+ * @param request The compaction request.
+ * @return The paths of new mob files generated in the compaction.
+ * @throws IOException
+ */
+ protected List<Path> performCompaction(PartitionedMobFileCompactionRequest request)
+ throws IOException {
+ // merge the del files
+ List<Path> delFilePaths = new ArrayList<Path>();
+ for (FileStatus delFile : request.delFiles) {
+ delFilePaths.add(delFile.getPath());
+ }
+ List<Path> newDelPaths = compactDelFiles(request, delFilePaths);
+ List<StoreFile> newDelFiles = new ArrayList<StoreFile>();
+ for (Path newDelPath : newDelPaths) {
+ StoreFile sf = new StoreFile(fs, newDelPath, conf, compactionCacheConfig, BloomType.NONE);
+ newDelFiles.add(sf);
+ }
+ LOG.info("After merging, there are " + newDelFiles.size() + " del files");
+ // compact the mob files by partitions.
+ List<Path> paths = compactMobFiles(request, newDelFiles);
+ LOG.info("After compaction, there are " + paths.size() + " mob files");
+ // archive the del files if all the mob files are selected.
+ if (request.type == CompactionType.ALL_FILES && !newDelPaths.isEmpty()) {
+ LOG.info("After a mob file compaction with all files selected, archiving the del files "
+ + newDelFiles);
+ try {
+ MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), newDelFiles);
+ } catch (IOException e) {
+ LOG.error("Failed to archive the del files " + newDelFiles, e);
+ }
+ }
+ return paths;
+ }
+
+ /**
+ * Compacts the selected small mob files and all the del files.
+ * @param request The compaction request.
+ * @param delFiles The del files.
+ * @return The paths of new mob files after compactions.
+ * @throws IOException
+ */
+ protected List<Path> compactMobFiles(final PartitionedMobFileCompactionRequest request,
+ final List<StoreFile> delFiles) throws IOException {
+ Collection<CompactionPartition> partitions = request.compactionPartitions;
+ if (partitions == null || partitions.isEmpty()) {
+ LOG.info("No partitions of mob files");
+ return Collections.emptyList();
+ }
+ List<Path> paths = new ArrayList<Path>();
- final HTable table = new HTable(conf, tableName);
++ Connection c = ConnectionFactory.createConnection(conf);
++ final Table table = c.getTable(tableName);
+ try {
+ Map<CompactionPartitionId, Future<List<Path>>> results =
+ new HashMap<CompactionPartitionId, Future<List<Path>>>();
+ // compact the mob files by partitions in parallel.
+ for (final CompactionPartition partition : partitions) {
+ results.put(partition.getPartitionId(), pool.submit(new Callable<List<Path>>() {
+ @Override
+ public List<Path> call() throws Exception {
+ LOG.info("Compacting mob files for partition " + partition.getPartitionId());
+ return compactMobFilePartition(request, partition, delFiles, table);
+ }
+ }));
+ }
+ // compact the partitions in parallel.
+ boolean hasFailure = false;
+ for (Entry<CompactionPartitionId, Future<List<Path>>> result : results.entrySet()) {
+ try {
+ paths.addAll(result.getValue().get());
+ } catch (Exception e) {
+ // just log the error
+ LOG.error("Failed to compact the partition " + result.getKey(), e);
+ hasFailure = true;
+ }
+ }
+ if (hasFailure) {
+ // if any partition fails in the compaction, directly throw an exception.
+ throw new IOException("Failed to compact the partitions");
+ }
+ } finally {
+ try {
+ table.close();
+ } catch (IOException e) {
+ LOG.error("Failed to close the HTable", e);
+ }
+ }
+ return paths;
+ }
+
+ /**
+ * Compacts a partition of selected small mob files and all the del files.
+ * @param request The compaction request.
+ * @param partition A compaction partition.
+ * @param delFiles The del files.
+ * @param table The current table.
+ * @return The paths of new mob files after compactions.
+ * @throws IOException
+ */
+ private List<Path> compactMobFilePartition(PartitionedMobFileCompactionRequest request,
- CompactionPartition partition, List<StoreFile> delFiles, HTable table) throws IOException {
++ CompactionPartition partition, List<StoreFile> delFiles, Table table) throws IOException {
+ List<Path> newFiles = new ArrayList<Path>();
+ List<FileStatus> files = partition.listFiles();
+ int offset = 0;
+ Path bulkloadPathOfPartition = new Path(bulkloadPath, partition.getPartitionId().toString());
+ Path bulkloadColumnPath = new Path(bulkloadPathOfPartition, column.getNameAsString());
+ while (offset < files.size()) {
+ int batch = compactionBatchSize;
+ if (files.size() - offset < compactionBatchSize) {
+ batch = files.size() - offset;
+ }
+ if (batch == 1 && delFiles.isEmpty()) {
+ // only one file left and no del files, do not compact it,
+ // and directly add it to the new files.
+ newFiles.add(files.get(offset).getPath());
+ offset++;
+ continue;
+ }
+ // clean the bulkload directory to avoid loading old files.
+ fs.delete(bulkloadPathOfPartition, true);
+ // add the selected mob files and del files into filesToCompact
+ List<StoreFile> filesToCompact = new ArrayList<StoreFile>();
+ for (int i = offset; i < batch + offset; i++) {
+ StoreFile sf = new StoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig,
+ BloomType.NONE);
+ filesToCompact.add(sf);
+ }
+ filesToCompact.addAll(delFiles);
+ // compact the mob files in a batch.
+ compactMobFilesInBatch(request, partition, table, filesToCompact, batch,
+ bulkloadPathOfPartition, bulkloadColumnPath, newFiles);
+ // move to the next batch.
+ offset += batch;
+ }
+ LOG.info("Compaction is finished. The number of mob files is changed from " + files.size()
+ + " to " + newFiles.size());
+ return newFiles;
+ }
+
+ /**
+ * Compacts a partition of selected small mob files and all the del files in a batch.
+ * @param request The compaction request.
+ * @param partition A compaction partition.
+ * @param table The current table.
+ * @param filesToCompact The files to be compacted.
+ * @param batch The number of mob files to be compacted in a batch.
+ * @param bulkloadPathOfPartition The directory where the bulkload column of the current
+ * partition is saved.
+ * @param bulkloadColumnPath The directory where the bulkload files of current partition
+ * are saved.
+ * @param newFiles The paths of new mob files after compactions.
+ * @throws IOException
+ */
+ private void compactMobFilesInBatch(PartitionedMobFileCompactionRequest request,
- CompactionPartition partition, HTable table, List<StoreFile> filesToCompact, int batch,
++ CompactionPartition partition, Table table, List<StoreFile> filesToCompact, int batch,
+ Path bulkloadPathOfPartition, Path bulkloadColumnPath, List<Path> newFiles)
+ throws IOException {
+ // open scanner to the selected mob files and del files.
+ StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES);
+ // the mob files to be compacted, not include the del files.
+ List<StoreFile> mobFilesToCompact = filesToCompact.subList(0, batch);
+ // Pair(maxSeqId, cellsCount)
+ Pair<Long, Long> fileInfo = getFileInfo(mobFilesToCompact);
+ // open writers for the mob files and new ref store files.
+ Writer writer = null;
+ Writer refFileWriter = null;
+ Path filePath = null;
+ Path refFilePath = null;
+ long mobCells = 0;
+ try {
+ writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getDate(),
+ tempPath, Long.MAX_VALUE, column.getCompactionCompression(), partition.getPartitionId()
+ .getStartKey(), compactionCacheConfig);
+ filePath = writer.getPath();
+ byte[] fileName = Bytes.toBytes(filePath.getName());
+ // create a temp file and open a writer for it in the bulkloadPath
+ refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo
+ .getSecond().longValue(), compactionCacheConfig);
+ refFilePath = refFileWriter.getPath();
+ List<Cell> cells = new ArrayList<Cell>();
+ boolean hasMore = false;
++ ScannerContext scannerContext =
++ ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
+ do {
- hasMore = scanner.next(cells, compactionKVMax);
++ hasMore = scanner.next(cells, scannerContext);
+ for (Cell cell : cells) {
+ // TODO remove this after the new code are introduced.
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ // write the mob cell to the mob file.
+ writer.append(kv);
+ // write the new reference cell to the store file.
+ KeyValue reference = MobUtils.createMobRefKeyValue(kv, fileName, tableNameTag);
+ refFileWriter.append(reference);
+ mobCells++;
+ }
+ cells.clear();
+ } while (hasMore);
+ } finally {
+ // close the scanner.
+ scanner.close();
+ // append metadata to the mob file, and close the mob file writer.
+ closeMobFileWriter(writer, fileInfo.getFirst(), mobCells);
+ // append metadata and bulkload info to the ref mob file, and close the writer.
+ closeRefFileWriter(refFileWriter, fileInfo.getFirst(), request.selectionTime);
+ }
+ if (mobCells > 0) {
+ // commit mob file
+ MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
+ // bulkload the ref file
+ bulkloadRefFile(table, bulkloadPathOfPartition, filePath.getName());
+ newFiles.add(new Path(mobFamilyDir, filePath.getName()));
+ } else {
+ // remove the new files
+ // the mob file is empty, delete it instead of committing.
+ deletePath(filePath);
+ // the ref file is empty, delete it instead of committing.
+ deletePath(refFilePath);
+ }
+ // archive the old mob files, do not archive the del files.
+ try {
+ MobUtils
+ .removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), mobFilesToCompact);
+ } catch (IOException e) {
+ LOG.error("Failed to archive the files " + mobFilesToCompact, e);
+ }
+ }
+
+ /**
+ * Compacts the del files in batches which avoids opening too many files.
+ * @param request The compaction request.
+ * @param delFilePaths
+ * @return The paths of new del files after merging or the original files if no merging
+ * is necessary.
+ * @throws IOException
+ */
+ protected List<Path> compactDelFiles(PartitionedMobFileCompactionRequest request,
+ List<Path> delFilePaths) throws IOException {
+ if (delFilePaths.size() <= delFileMaxCount) {
+ return delFilePaths;
+ }
+ // when there are more del files than the number that is allowed, merge it firstly.
+ int offset = 0;
+ List<Path> paths = new ArrayList<Path>();
+ while (offset < delFilePaths.size()) {
+ // get the batch
+ int batch = compactionBatchSize;
+ if (delFilePaths.size() - offset < compactionBatchSize) {
+ batch = delFilePaths.size() - offset;
+ }
+ List<StoreFile> batchedDelFiles = new ArrayList<StoreFile>();
+ if (batch == 1) {
+ // only one file left, do not compact it, directly add it to the new files.
+ paths.add(delFilePaths.get(offset));
+ offset++;
+ continue;
+ }
+ for (int i = offset; i < batch + offset; i++) {
+ batchedDelFiles.add(new StoreFile(fs, delFilePaths.get(i), conf, compactionCacheConfig,
+ BloomType.NONE));
+ }
+ // compact the del files in a batch.
+ paths.add(compactDelFilesInBatch(request, batchedDelFiles));
+ // move to the next batch.
+ offset += batch;
+ }
+ return compactDelFiles(request, paths);
+ }
+
+ /**
+ * Compacts the del file in a batch.
+ * @param request The compaction request.
+ * @param delFiles The del files.
+ * @return The path of new del file after merging.
+ * @throws IOException
+ */
+ private Path compactDelFilesInBatch(PartitionedMobFileCompactionRequest request,
+ List<StoreFile> delFiles) throws IOException {
+ // create a scanner for the del files.
+ StoreScanner scanner = createScanner(delFiles, ScanType.COMPACT_RETAIN_DELETES);
+ Writer writer = null;
+ Path filePath = null;
+ try {
+ writer = MobUtils.createDelFileWriter(conf, fs, column,
+ MobUtils.formatDate(new Date(request.selectionTime)), tempPath, Long.MAX_VALUE,
+ column.getCompactionCompression(), HConstants.EMPTY_START_ROW, compactionCacheConfig);
+ filePath = writer.getPath();
+ List<Cell> cells = new ArrayList<Cell>();
+ boolean hasMore = false;
++ ScannerContext scannerContext =
++ ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
+ do {
- hasMore = scanner.next(cells, compactionKVMax);
++ hasMore = scanner.next(cells, scannerContext);
+ for (Cell cell : cells) {
+ // TODO remove this after the new code are introduced.
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ writer.append(kv);
+ }
+ cells.clear();
+ } while (hasMore);
+ } finally {
+ scanner.close();
+ if (writer != null) {
+ try {
+ writer.close();
+ } catch (IOException e) {
+ LOG.error("Failed to close the writer of the file " + filePath, e);
+ }
+ }
+ }
+ // commit the new del file
+ Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
+ // archive the old del files
+ try {
+ MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
+ } catch (IOException e) {
+ LOG.error("Failed to archive the old del files " + delFiles, e);
+ }
+ return path;
+ }
+
+ /**
+ * Creates a store scanner.
+ * @param filesToCompact The files to be compacted.
+ * @param scanType The scan type.
+ * @return The store scanner.
+ * @throws IOException
+ */
+ private StoreScanner createScanner(List<StoreFile> filesToCompact, ScanType scanType)
+ throws IOException {
+ List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false,
+ null, HConstants.LATEST_TIMESTAMP);
+ Scan scan = new Scan();
+ scan.setMaxVersions(column.getMaxVersions());
+ long ttl = HStore.determineTTLFromFamily(column);
+ ScanInfo scanInfo = new ScanInfo(column, ttl, 0, KeyValue.COMPARATOR);
+ StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L,
+ HConstants.LATEST_TIMESTAMP);
+ return scanner;
+ }
+
+ /**
+ * Bulkloads the current file.
+ * @param table The current table.
+ * @param bulkloadDirectory The path of bulkload directory.
+ * @param fileName The current file name.
+ * @throws IOException
+ */
- private void bulkloadRefFile(HTable table, Path bulkloadDirectory, String fileName)
++ private void bulkloadRefFile(Table table, Path bulkloadDirectory, String fileName)
+ throws IOException {
+ // bulkload the ref file
+ try {
+ LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
- bulkload.doBulkLoad(bulkloadDirectory, table);
++ bulkload.doBulkLoad(bulkloadDirectory, (HTable)table);
+ } catch (Exception e) {
+ // delete the committed mob file
+ deletePath(new Path(mobFamilyDir, fileName));
+ throw new IOException(e);
+ } finally {
+ // delete the bulkload files in bulkloadPath
+ deletePath(bulkloadDirectory);
+ }
+ }
+
+ /**
+ * Closes the mob file writer.
+ * @param writer The mob file writer.
+ * @param maxSeqId Maximum sequence id.
+ * @param mobCellsCount The number of mob cells.
+ * @throws IOException
+ */
+ private void closeMobFileWriter(Writer writer, long maxSeqId, long mobCellsCount)
+ throws IOException {
+ if (writer != null) {
+ writer.appendMetadata(maxSeqId, false, mobCellsCount);
+ try {
+ writer.close();
+ } catch (IOException e) {
+ LOG.error("Failed to close the writer of the file " + writer.getPath(), e);
+ }
+ }
+ }
+
+ /**
+ * Closes the ref file writer.
+ * @param writer The ref file writer.
+ * @param maxSeqId Maximum sequence id.
+ * @param bulkloadTime The timestamp at which the bulk load file is created.
+ * @throws IOException
+ */
+ private void closeRefFileWriter(Writer writer, long maxSeqId, long bulkloadTime)
+ throws IOException {
+ if (writer != null) {
+ writer.appendMetadata(maxSeqId, false);
+ writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(bulkloadTime));
+ try {
+ writer.close();
+ } catch (IOException e) {
+ LOG.error("Failed to close the writer of the ref file " + writer.getPath(), e);
+ }
+ }
+ }
+
+ /**
+ * Gets the max seqId and number of cells of the store files.
+ * @param storeFiles The store files.
+ * @return The pair of the max seqId and number of cells of the store files.
+ * @throws IOException
+ */
+ private Pair<Long, Long> getFileInfo(List<StoreFile> storeFiles) throws IOException {
+ long maxSeqId = 0;
+ long maxKeyCount = 0;
+ for (StoreFile sf : storeFiles) {
+ // the readers will be closed later after the merge.
+ maxSeqId = Math.max(maxSeqId, sf.getMaxSequenceId());
+ byte[] count = sf.createReader().loadFileInfo().get(StoreFile.MOB_CELLS_COUNT);
+ if (count != null) {
+ maxKeyCount += Bytes.toLong(count);
+ }
+ }
+ return new Pair<Long, Long>(Long.valueOf(maxSeqId), Long.valueOf(maxKeyCount));
+ }
+
+ /**
+ * Deletes a file.
+ * @param path The path of the file to be deleted.
+ */
+ private void deletePath(Path path) {
+ try {
+ if (path != null) {
+ fs.delete(path, true);
+ }
+ } catch (IOException e) {
+ LOG.error("Failed to delete the file " + path, e);
+ }
+ }
+
+ private FileStatus getLinkedFileStatus(HFileLink link) throws IOException {
+ Path[] locations = link.getLocations();
+ for (Path location : locations) {
+ FileStatus file = getFileStatus(location);
+ if (file != null) {
+ return file;
+ }
+ }
+ return null;
+ }
+
+ private FileStatus getFileStatus(Path path) throws IOException {
+ try {
+ if (path != null) {
+ FileStatus file = fs.getFileStatus(path);
+ return file;
+ }
+ } catch (FileNotFoundException e) {
+ LOG.warn("The file " + path + " can not be found", e);
+ }
+ return null;
+ }
+}
[34/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework (addendum)
Posted by jm...@apache.org.
HBASE-13202 Procedure v2 - core framework (addendum)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d75326a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d75326a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d75326a7
Branch: refs/heads/hbase-11339
Commit: d75326a7974881a41993e210b9c5b7d4b0fe5b8b
Parents: 4f15144
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Apr 15 09:39:25 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Apr 15 09:50:47 2015 +0100
----------------------------------------------------------------------
.../procedure2/ProcedureFairRunQueues.java | 1 +
.../procedure2/store/ProcedureStoreTracker.java | 12 ++++++++--
.../procedure2/store/wal/WALProcedureStore.java | 6 ++---
.../store/TestProcedureStoreTracker.java | 25 ++++++++++++++++++++
4 files changed, 39 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d75326a7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
index 03d007a..242ae86 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java
@@ -95,6 +95,7 @@ public class ProcedureFairRunQueues<TKey, TQueue extends ProcedureFairRunQueues.
public void clear() {
lock.lock();
try {
+ currentQuantum = 0;
current = null;
objMap.clear();
} finally {
http://git-wip-us.apache.org/repos/asf/hbase/blob/d75326a7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 4e4653a..a4711f1 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -195,11 +195,12 @@ public class ProcedureStoreTracker {
// Grow/Merge Helpers
// ========================================================================
public boolean canGrow(final long procId) {
- return (procId - start) < MAX_NODE_SIZE;
+ return Math.abs(procId - start) < MAX_NODE_SIZE;
}
public boolean canMerge(final BitSetNode rightNode) {
- return (start + rightNode.getEnd()) < MAX_NODE_SIZE;
+ assert start < rightNode.getEnd();
+ return (rightNode.getEnd() - start) < MAX_NODE_SIZE;
}
public void grow(final long procId) {
@@ -258,6 +259,11 @@ public class ProcedureStoreTracker {
}
}
+ @Override
+ public String toString() {
+ return "BitSetNode(" + getStart() + "-" + getEnd() + ")";
+ }
+
// ========================================================================
// Min/Max Helpers
// ========================================================================
@@ -377,6 +383,7 @@ public class ProcedureStoreTracker {
@InterfaceAudience.Private
public void setDeleted(final long procId, final boolean isDeleted) {
BitSetNode node = getOrCreateNode(procId);
+ assert node.contains(procId) : "expected procId in the node";
node.updateState(procId, isDeleted);
}
@@ -507,6 +514,7 @@ public class ProcedureStoreTracker {
}
private BitSetNode mergeNodes(BitSetNode leftNode, BitSetNode rightNode) {
+ assert leftNode.getStart() < rightNode.getStart();
leftNode.merge(rightNode);
map.remove(rightNode.getStart());
return leftNode;
http://git-wip-us.apache.org/repos/asf/hbase/blob/d75326a7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 13f7bfa..09d2f7a 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -389,7 +389,7 @@ public class WALProcedureStore implements ProcedureStore {
}
private long pushData(final ByteSlot slot) {
- assert !logs.isEmpty() : "recoverLease() must be called before inserting data";
+ assert isRunning() && !logs.isEmpty() : "recoverLease() must be called before inserting data";
long logId = -1;
lock.lock();
@@ -677,7 +677,7 @@ public class WALProcedureStore implements ProcedureStore {
try {
log.readTracker(storeTracker);
} catch (IOException e) {
- LOG.error("Unable to read tracker for " + log, e);
+ LOG.warn("Unable to read tracker for " + log + " - " + e.getMessage());
// try the next one...
storeTracker.clear();
storeTracker.setPartialFlag(true);
@@ -718,4 +718,4 @@ public class WALProcedureStore implements ProcedureStore {
}
return log;
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/d75326a7/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
index 0669549..be759dc 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.procedure2.store;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -165,4 +166,28 @@ public class TestProcedureStoreTracker {
tracker.delete(procs[5].getProcId());
assertTrue(tracker.isEmpty());
}
+
+ @Test
+ public void testRandLoad() {
+ final int NPROCEDURES = 2500;
+ final int NRUNS = 5000;
+
+ final ProcedureStoreTracker tracker = new ProcedureStoreTracker();
+
+ Random rand = new Random(1);
+ for (int i = 0; i < NRUNS; ++i) {
+ assertTrue(tracker.isEmpty());
+
+ int count = 0;
+ while (count < NPROCEDURES) {
+ long procId = rand.nextLong();
+ if (procId < 1) continue;
+
+ tracker.setDeleted(procId, i % 2 == 0);
+ count++;
+ }
+
+ tracker.clear();
+ }
+ }
}
[42/50] [abbrv] hbase git commit: HBASE-12987 Pare repeated hbck
output and increase verbosity in long-running tasks.
Posted by jm...@apache.org.
HBASE-12987 Pare repeated hbck output and increase verbosity in long-running tasks.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/682a29a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/682a29a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/682a29a5
Branch: refs/heads/hbase-11339
Commit: 682a29a57f73b836859b3d3e1048fc82d64e8fe3
Parents: 14261bc
Author: Josh Elser <el...@apache.org>
Authored: Wed Apr 8 16:22:22 2015 -0400
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed Apr 15 14:35:43 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/util/FSUtils.java | 55 +++++++++++++++++++-
.../org/apache/hadoop/hbase/util/HBaseFsck.java | 38 +++++++++++---
2 files changed, 85 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/682a29a5/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 0d0912e..e86054b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -1546,6 +1547,28 @@ public abstract class FSUtils {
public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
final FileSystem fs, final Path hbaseRootDir, TableName tableName)
throws IOException {
+ return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null);
+ }
+
+ /**
+ * Runs through the HBase rootdir/tablename and creates a reverse lookup map for
+ * table StoreFile names to the full Path.
+ * <br>
+ * Example...<br>
+ * Key = 3944417774205889744 <br>
+ * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
+ *
+ * @param map map to add values. If null, this method will create and populate one to return
+ * @param fs The file system to use.
+ * @param hbaseRootDir The root directory to scan.
+ * @param tableName name of the table to scan.
+ * @param errors ErrorReporter instance or null
+ * @return Map keyed by StoreFile name with a value of the full Path.
+ * @throws IOException When scanning the directory fails.
+ */
+ public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
+ final FileSystem fs, final Path hbaseRootDir, TableName tableName, ErrorReporter errors)
+ throws IOException {
if (map == null) {
map = new HashMap<String, Path>();
}
@@ -1557,10 +1580,16 @@ public abstract class FSUtils {
PathFilter familyFilter = new FamilyDirFilter(fs);
FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
for (FileStatus regionDir : regionDirs) {
+ if (null != errors) {
+ errors.progress();
+ }
Path dd = regionDir.getPath();
// else its a region name, now look in region for families
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
for (FileStatus familyDir : familyDirs) {
+ if (null != errors) {
+ errors.progress();
+ }
Path family = familyDir.getPath();
if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
continue;
@@ -1569,6 +1598,9 @@ public abstract class FSUtils {
// put in map
FileStatus[] familyStatus = fs.listStatus(family);
for (FileStatus sfStatus : familyStatus) {
+ if (null != errors) {
+ errors.progress();
+ }
Path sf = sfStatus.getPath();
map.put( sf.getName(), sf);
}
@@ -1589,7 +1621,6 @@ public abstract class FSUtils {
return result;
}
-
/**
* Runs through the HBase rootdir and creates a reverse lookup map for
* table StoreFile names to the full Path.
@@ -1606,6 +1637,26 @@ public abstract class FSUtils {
public static Map<String, Path> getTableStoreFilePathMap(
final FileSystem fs, final Path hbaseRootDir)
throws IOException {
+ return getTableStoreFilePathMap(fs, hbaseRootDir, null);
+ }
+
+ /**
+ * Runs through the HBase rootdir and creates a reverse lookup map for
+ * table StoreFile names to the full Path.
+ * <br>
+ * Example...<br>
+ * Key = 3944417774205889744 <br>
+ * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
+ *
+ * @param fs The file system to use.
+ * @param hbaseRootDir The root directory to scan.
+ * @param errors ErrorReporter instance or null
+ * @return Map keyed by StoreFile name with a value of the full Path.
+ * @throws IOException When scanning the directory fails.
+ */
+ public static Map<String, Path> getTableStoreFilePathMap(
+ final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors)
+ throws IOException {
Map<String, Path> map = new HashMap<String, Path>();
// if this method looks similar to 'getTableFragmentation' that is because
@@ -1614,7 +1665,7 @@ public abstract class FSUtils {
// only include the directory paths to tables
for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
getTableStoreFilePathMap(map, fs, hbaseRootDir,
- FSUtils.getTableName(tableDir));
+ FSUtils.getTableName(tableDir), errors);
}
return map;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/682a29a5/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index f8fdd96..67e3411 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -641,13 +641,17 @@ public class HBaseFsck extends Configured implements Closeable {
// load regiondirs and regioninfos from HDFS
if (shouldCheckHdfs()) {
+ LOG.info("Loading region directories from HDFS");
loadHdfsRegionDirs();
+ LOG.info("Loading region information from HDFS");
loadHdfsRegionInfos();
}
// fix the orphan tables
fixOrphanTables();
+ LOG.info("Checking and fixing region consistency");
+
// Check and fix consistency
checkAndFixConsistency();
@@ -970,7 +974,10 @@ public class HBaseFsck extends Configured implements Closeable {
Configuration conf = getConf();
Path hbaseRoot = FSUtils.getRootDir(conf);
FileSystem fs = hbaseRoot.getFileSystem(conf);
- Map<String, Path> allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot);
+ LOG.info("Computing mapping of all store files");
+ Map<String, Path> allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot, errors);
+ errors.print("");
+ LOG.info("Validating mapping using HDFS state");
for (Path path: allFiles.values()) {
boolean isReference = false;
try {
@@ -1168,6 +1175,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
loadTableInfosForTablesWithNoRegion();
+ errors.print("");
return tablesInfo;
}
@@ -1358,6 +1366,7 @@ public class HBaseFsck extends Configured implements Closeable {
*/
private void suggestFixes(
SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
+ logParallelMerge();
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
tInfo.checkRegionChain(handler);
@@ -1431,9 +1440,23 @@ public class HBaseFsck extends Configured implements Closeable {
return true;
}
+ /**
+ * Log an appropriate message about whether or not overlapping merges are computed in parallel.
+ */
+ private void logParallelMerge() {
+ if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) {
+ LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" +
+ " false to run serially.");
+ } else {
+ LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" +
+ " true to run in parallel.");
+ }
+ }
+
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles,
boolean fixOverlaps) throws IOException {
LOG.info("Checking HBase region split map from HDFS data...");
+ logParallelMerge();
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler;
if (fixHoles || fixOverlaps) {
@@ -1662,6 +1685,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.warn("Could not load region dir " , e.getCause());
}
}
+ errors.print("");
}
/**
@@ -2395,6 +2419,7 @@ public class HBaseFsck extends Configured implements Closeable {
loadTableInfosForTablesWithNoRegion();
+ logParallelMerge();
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
if (!tInfo.checkRegionChain(handler)) {
@@ -3011,15 +3036,11 @@ public class HBaseFsck extends Configured implements Closeable {
// TODO fold this into the TableIntegrityHandler
if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) {
- LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" +
- " false to run serially.");
boolean ok = handleOverlapsParallel(handler, prevKey);
if (!ok) {
return false;
}
} else {
- LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" +
- " true to run in parallel.");
for (Collection<HbckInfo> overlap : overlapGroups.asMap().values()) {
handler.handleOverlapGroup(overlap);
}
@@ -3745,6 +3766,8 @@ public class HBaseFsck extends Configured implements Closeable {
static class PrintingErrorReporter implements ErrorReporter {
public int errorCount = 0;
private int showProgress;
+ // How frequently calls to progress() will create output
+ private static final int progressThreshold = 100;
Set<TableInfo> errorTables = new HashSet<TableInfo>();
@@ -3859,7 +3882,7 @@ public class HBaseFsck extends Configured implements Closeable {
@Override
public synchronized void progress() {
- if (showProgress++ == 10) {
+ if (showProgress++ == progressThreshold) {
if (!summary) {
System.out.print(".");
}
@@ -3956,6 +3979,7 @@ public class HBaseFsck extends Configured implements Closeable {
// level 2: <HBASE_DIR>/<table>/*
FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
for (FileStatus regionDir : regionDirs) {
+ errors.progress();
String encodedName = regionDir.getPath().getName();
// ignore directories that aren't hexadecimal
if (!encodedName.toLowerCase().matches("[0-9a-f]+")) {
@@ -3983,6 +4007,7 @@ public class HBaseFsck extends Configured implements Closeable {
FileStatus[] subDirs = fs.listStatus(regionDir.getPath());
Path ePath = WALSplitter.getRegionDirRecoveredEditsDir(regionDir.getPath());
for (FileStatus subDir : subDirs) {
+ errors.progress();
String sdName = subDir.getPath().getName();
if (!sdName.startsWith(".") && !sdName.equals(ePath.getName())) {
he.hdfsOnlyEdits = false;
@@ -4023,6 +4048,7 @@ public class HBaseFsck extends Configured implements Closeable {
// only load entries that haven't been loaded yet.
if (hbi.getHdfsHRI() == null) {
try {
+ errors.progress();
hbck.loadHdfsRegioninfo(hbi);
} catch (IOException ioe) {
String msg = "Orphan region in HDFS: Unable to load .regioninfo from table "
[16/50] [abbrv] hbase git commit: HBASE-13211 Procedure V2 - master
Enable/Disable table (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index a07516d..a9ad0e0 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -135,3 +135,33 @@ message DeleteColumnFamilyStateData {
required bytes columnfamily_name = 3;
optional TableSchema unmodified_table_schema = 4;
}
+
+enum EnableTableState {
+ ENABLE_TABLE_PREPARE = 1;
+ ENABLE_TABLE_PRE_OPERATION = 2;
+ ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3;
+ ENABLE_TABLE_MARK_REGIONS_ONLINE = 4;
+ ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5;
+ ENABLE_TABLE_POST_OPERATION = 6;
+}
+
+message EnableTableStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required bool skip_table_state_check = 3;
+}
+
+enum DisableTableState {
+ DISABLE_TABLE_PREPARE = 1;
+ DISABLE_TABLE_PRE_OPERATION = 2;
+ DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3;
+ DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4;
+ DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5;
+ DISABLE_TABLE_POST_OPERATION = 6;
+}
+
+message DisableTableStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required bool skip_table_state_check = 3;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 2e33095..ff28081 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -87,14 +87,14 @@ import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
-import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
@@ -1681,11 +1681,24 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preEnableTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
- this.service.submit(new EnableTableHandler(this, tableName,
- assignmentManager, tableLockManager, false).prepare());
+
+ // Execute the operation asynchronously - client will check the progress of the operation
+ final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
+ long procId =
+ this.procedureExecutor.submitProcedure(new EnableTableProcedure(procedureExecutor
+ .getEnvironment(), tableName, false, prepareLatch));
+ // Before returning to client, we want to make sure that the table is prepared to be
+ // enabled (the table is locked and the table state is set).
+ //
+ // Note: if the procedure throws exception, we will catch it and rethrow.
+ prepareLatch.await();
+
if (cpHost != null) {
cpHost.postEnableTable(tableName);
- }
+ }
+
+ // TODO: return procId as part of client-side change
+ // return procId;
}
@Override
@@ -1695,11 +1708,25 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preDisableTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
- this.service.submit(new DisableTableHandler(this, tableName,
- assignmentManager, tableLockManager, false).prepare());
+
+ // Execute the operation asynchronously - client will check the progress of the operation
+ final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
+ // Execute the operation asynchronously - client will check the progress of the operation
+ long procId =
+ this.procedureExecutor.submitProcedure(new DisableTableProcedure(procedureExecutor
+ .getEnvironment(), tableName, false, prepareLatch));
+ // Before returning to client, we want to make sure that the table is prepared to be
+ // enabled (the table is locked and the table state is set).
+ //
+ // Note: if the procedure throws exception, we will catch it and rethrow.
+ prepareLatch.await();
+
if (cpHost != null) {
cpHost.postDisableTable(tableName);
}
+
+ // TODO: return procId as part of client-side change
+ // return procId;
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
new file mode 100644
index 0000000..2507cec
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -0,0 +1,540 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkAssigner;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.htrace.Trace;
+
+@InterfaceAudience.Private
+public class DisableTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, DisableTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DisableTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ // This is for back compatible with 1.0 asynchronized operations.
+ private final ProcedurePrepareLatch syncLatch;
+
+ private TableName tableName;
+ private boolean skipTableStateCheck;
+ private UserGroupInformation user;
+
+ private Boolean traceEnabled = null;
+
+ enum MarkRegionOfflineOpResult {
+ MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL,
+ BULK_ASSIGN_REGIONS_FAILED,
+ MARK_ALL_REGIONS_OFFLINE_INTERRUPTED,
+ }
+
+ public DisableTableProcedure() {
+ syncLatch = null;
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ * @throws IOException
+ */
+ public DisableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck) throws IOException {
+ this(env, tableName, skipTableStateCheck, null);
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ * @throws IOException
+ */
+ public DisableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck,
+ final ProcedurePrepareLatch syncLatch) throws IOException {
+ this.tableName = tableName;
+ this.skipTableStateCheck = skipTableStateCheck;
+ this.user = env.getRequestUser().getUGI();
+
+ // Compatible with 1.0: We use latch to make sure that this procedure implementation is
+ // compatible with 1.0 asynchronized operations. We need to lock the table and check
+ // whether the Disable operation could be performed (table exists and online; table state
+ // is ENABLED). Once it is done, we are good to release the latch and the client can
+ // start asynchronously wait for the operation.
+ //
+ // Note: the member syncLatch could be null if we are in failover or recovery scenario.
+ // This is ok for backward compatible, as 1.0 client would not able to peek at procedure.
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTableState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case DISABLE_TABLE_PREPARE:
+ if (prepareDisable(env)) {
+ setNextState(DisableTableState.DISABLE_TABLE_PRE_OPERATION);
+ } else {
+ assert isFailed() : "disable should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+ break;
+ case DISABLE_TABLE_PRE_OPERATION:
+ preDisable(env, state);
+ setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLING_TABLE_STATE);
+ break;
+ case DISABLE_TABLE_SET_DISABLING_TABLE_STATE:
+ setTableStateToDisabling(env, tableName);
+ setNextState(DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE);
+ break;
+ case DISABLE_TABLE_MARK_REGIONS_OFFLINE:
+ if (markRegionsOffline(env, tableName, true) ==
+ MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
+ setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE);
+ } else {
+ LOG.trace("Retrying later to disable the missing regions");
+ }
+ break;
+ case DISABLE_TABLE_SET_DISABLED_TABLE_STATE:
+ setTableStateToDisabled(env, tableName);
+ setNextState(DisableTableState.DISABLE_TABLE_POST_OPERATION);
+ break;
+ case DISABLE_TABLE_POST_OPERATION:
+ postDisable(env, state);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ LOG.warn("Retriable error trying to disable table=" + tableName + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException {
+ if (state == DisableTableState.DISABLE_TABLE_PREPARE) {
+ // nothing to rollback, prepare-disable is just table-state checks.
+ // We can fail if the table does not exist or is not disabled.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ return;
+ }
+
+ // The delete doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+
+ @Override
+ protected DisableTableState getState(final int stateId) {
+ return DisableTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final DisableTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected DisableTableState getInitialState() {
+ return DisableTableState.DISABLE_TABLE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(final DisableTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("disable-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_DISABLE_TABLE.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DisableTableStateData.Builder disableTableMsg =
+ MasterProcedureProtos.DisableTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setSkipTableStateCheck(skipTableStateCheck);
+
+ disableTableMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.DisableTableStateData disableTableMsg =
+ MasterProcedureProtos.DisableTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(disableTableMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(disableTableMsg.getTableName());
+ skipTableStateCheck = disableTableMsg.getSkipTableStateCheck();
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.DISABLE;
+ }
+
+ /**
+ * Action before any real action of disabling table. Set the exception in the procedure instead
+ * of throwing it. This approach is to deal with backward compatible with 1.0.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
+ boolean canTableBeDisabled = true;
+ if (tableName.equals(TableName.META_TABLE_NAME)) {
+ setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
+ canTableBeDisabled = false;
+ } else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ setFailure("master-disable-table", new TableNotFoundException(tableName));
+ canTableBeDisabled = false;
+ } else if (!skipTableStateCheck) {
+ // There could be multiple client requests trying to disable or enable
+ // the table at the same time. Ensure only the first request is honored
+ // After that, no other requests can be accepted until the table reaches
+ // DISABLED or ENABLED.
+ //
+ // Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
+ // the state to DISABLING from ENABLED. The implementation was done before table lock
+ // was implemented. With table lock, there is no need to set the state here (it will
+ // set the state later on). A quick state check should be enough for us to move forward.
+ TableStateManager tsm =
+ env.getMasterServices().getAssignmentManager().getTableStateManager();
+ if (!tsm.getTableState(tableName).equals(TableState.State.ENABLED)) {
+ LOG.info("Table " + tableName + " isn't enabled; skipping disable");
+ setFailure("master-disable-table", new TableNotEnabledException(tableName));
+ canTableBeDisabled = false;
+ }
+ }
+
+ // We are done the check. Future actions in this procedure could be done asynchronously.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+
+ return canTableBeDisabled;
+ }
+
+ /**
+ * Action before disabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ protected void preDisable(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Mark table state to Disabling
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ protected static void setTableStateToDisabling(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ // Set table disabling flag up in zk.
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ TableState.State.DISABLING);
+ }
+
+ /**
+ * Mark regions of the table offline with retries
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @param retryRequired whether to retry if the first run failed
+ * @return whether the operation is fully completed or being interrupted.
+ * @throws IOException
+ */
+ protected static MarkRegionOfflineOpResult markRegionsOffline(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final Boolean retryRequired) throws IOException {
+ // Dev consideration: add a config to control max number of retry. For now, it is hard coded.
+ int maxTry = (retryRequired ? 10 : 1);
+ MarkRegionOfflineOpResult operationResult =
+ MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED;
+ do {
+ try {
+ operationResult = markRegionsOffline(env, tableName);
+ if (operationResult == MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
+ break;
+ }
+ maxTry--;
+ } catch (Exception e) {
+ LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e);
+ maxTry--;
+ if (maxTry > 0) {
+ continue; // we still have some retry left, try again.
+ }
+ throw e;
+ }
+ } while (maxTry > 0);
+
+ if (operationResult != MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
+ LOG.warn("Some or all regions of the Table '" + tableName + "' were still online");
+ }
+
+ return operationResult;
+ }
+
+ /**
+ * Mark regions of the table offline
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @return whether the operation is fully completed or being interrupted.
+ * @throws IOException
+ */
+ private static MarkRegionOfflineOpResult markRegionsOffline(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ // Get list of online regions that are of this table. Regions that are
+ // already closed will not be included in this list; i.e. the returned
+ // list is not ALL regions in a table, its all online regions according
+ // to the in-memory state on this master.
+ MarkRegionOfflineOpResult operationResult =
+ MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL;
+ final List<HRegionInfo> regions =
+ env.getMasterServices().getAssignmentManager().getRegionStates()
+ .getRegionsOfTable(tableName);
+ if (regions.size() > 0) {
+ LOG.info("Offlining " + regions.size() + " regions.");
+
+ BulkDisabler bd = new BulkDisabler(env, tableName, regions);
+ try {
+ if (!bd.bulkAssign()) {
+ operationResult = MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED;
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Disable was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ operationResult = MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_INTERRUPTED;
+ }
+ }
+ return operationResult;
+ }
+
+ /**
+ * Mark table state to Disabled
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ protected static void setTableStateToDisabled(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ // Flip the table to disabled
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ TableState.State.DISABLED);
+ LOG.info("Disabled table, " + tableName + ", is completed.");
+ }
+
+ /**
+ * Action after disabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ protected void postDisable(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case DISABLE_TABLE_PRE_OPERATION:
+ cpHost.preDisableTableHandler(tableName);
+ break;
+ case DISABLE_TABLE_POST_OPERATION:
+ cpHost.postDisableTableHandler(tableName);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+
+ /**
+ * Run bulk disable.
+ */
+ private static class BulkDisabler extends BulkAssigner {
+ private final AssignmentManager assignmentManager;
+ private final List<HRegionInfo> regions;
+ private final TableName tableName;
+ private final int waitingTimeForEvents;
+
+ public BulkDisabler(final MasterProcedureEnv env, final TableName tableName,
+ final List<HRegionInfo> regions) {
+ super(env.getMasterServices());
+ this.assignmentManager = env.getMasterServices().getAssignmentManager();
+ this.tableName = tableName;
+ this.regions = regions;
+ this.waitingTimeForEvents =
+ env.getMasterServices().getConfiguration()
+ .getInt("hbase.master.event.waiting.time", 1000);
+ }
+
+ @Override
+ protected void populatePool(ExecutorService pool) {
+ RegionStates regionStates = assignmentManager.getRegionStates();
+ for (final HRegionInfo region : regions) {
+ if (regionStates.isRegionInTransition(region)
+ && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
+ continue;
+ }
+ pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler", new Runnable() {
+ @Override
+ public void run() {
+ assignmentManager.unassign(region);
+ }
+ }));
+ }
+ }
+
+ @Override
+ protected boolean waitUntilDone(long timeout) throws InterruptedException {
+ long startTime = EnvironmentEdgeManager.currentTime();
+ long remaining = timeout;
+ List<HRegionInfo> regions = null;
+ long lastLogTime = startTime;
+ while (!server.isStopped() && remaining > 0) {
+ Thread.sleep(waitingTimeForEvents);
+ regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
+ long now = EnvironmentEdgeManager.currentTime();
+ // Don't log more than once every ten seconds. Its obnoxious. And only log table regions
+ // if we are waiting a while for them to go down...
+ if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) {
+ lastLogTime = now;
+ LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions);
+ }
+ if (regions.isEmpty()) break;
+ remaining = timeout - (now - startTime);
+ }
+ return regions != null && regions.isEmpty();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
new file mode 100644
index 0000000..aefb0b1
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -0,0 +1,582 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkAssigner;
+import org.apache.hadoop.hbase.master.GeneralBulkAssigner;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class EnableTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, EnableTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(EnableTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ // This is for back compatible with 1.0 asynchronized operations.
+ private final ProcedurePrepareLatch syncLatch;
+
+ private TableName tableName;
+ private boolean skipTableStateCheck;
+ private UserGroupInformation user;
+
+ private Boolean traceEnabled = null;
+
+ public EnableTableProcedure() {
+ syncLatch = null;
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ * @throws IOException
+ */
+ public EnableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck) throws IOException {
+ this(env, tableName, skipTableStateCheck, null);
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ */
+ public EnableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck,
+ final ProcedurePrepareLatch syncLatch) throws IOException {
+ this.tableName = tableName;
+ this.skipTableStateCheck = skipTableStateCheck;
+ this.user = env.getRequestUser().getUGI();
+
+ // Compatible with 1.0: We use latch to make sure that this procedure implementation is
+ // compatible with 1.0 asynchronized operations. We need to lock the table and check
+ // whether the Enable operation could be performed (table exists and offline; table state
+ // is DISABLED). Once it is done, we are good to release the latch and the client can
+ // start asynchronously wait for the operation.
+ //
+ // Note: the member syncLatch could be null if we are in failover or recovery scenario.
+ // This is ok for backward compatible, as 1.0 client would not able to peek at procedure.
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case ENABLE_TABLE_PREPARE:
+ if (prepareEnable(env)) {
+ setNextState(EnableTableState.ENABLE_TABLE_PRE_OPERATION);
+ } else {
+ assert isFailed() : "enable should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+ break;
+ case ENABLE_TABLE_PRE_OPERATION:
+ preEnable(env, state);
+ setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLING_TABLE_STATE);
+ break;
+ case ENABLE_TABLE_SET_ENABLING_TABLE_STATE:
+ setTableStateToEnabling(env, tableName);
+ setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
+ break;
+ case ENABLE_TABLE_MARK_REGIONS_ONLINE:
+ markRegionsOnline(env, tableName, true);
+ setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLED_TABLE_STATE);
+ break;
+ case ENABLE_TABLE_SET_ENABLED_TABLE_STATE:
+ setTableStateToEnabled(env, tableName);
+ setNextState(EnableTableState.ENABLE_TABLE_POST_OPERATION);
+ break;
+ case ENABLE_TABLE_POST_OPERATION:
+ postEnable(env, state);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ LOG.error("Error trying to enable table=" + tableName + " state=" + state, e);
+ setFailure("master-enable-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case ENABLE_TABLE_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo (eg. DisableTableProcedure.preDisable())?
+ break;
+ case ENABLE_TABLE_SET_ENABLED_TABLE_STATE:
+ DisableTableProcedure.setTableStateToDisabling(env, tableName);
+ break;
+ case ENABLE_TABLE_MARK_REGIONS_ONLINE:
+ markRegionsOfflineDuringRecovery(env);
+ break;
+ case ENABLE_TABLE_SET_ENABLING_TABLE_STATE:
+ DisableTableProcedure.setTableStateToDisabled(env, tableName);
+ break;
+ case ENABLE_TABLE_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo (eg. DisableTableProcedure.postDisable())?
+ break;
+ case ENABLE_TABLE_PREPARE:
+ // Nothing to undo for this state.
+ // We do need to count down the latch count so that we don't stuck.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ break;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed enable table rollback attempt step=" + state + " table=" + tableName, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected EnableTableState getState(final int stateId) {
+ return EnableTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final EnableTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected EnableTableState getInitialState() {
+ return EnableTableState.ENABLE_TABLE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(final EnableTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("Enable-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_ENABLE_TABLE.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.EnableTableStateData.Builder enableTableMsg =
+ MasterProcedureProtos.EnableTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setSkipTableStateCheck(skipTableStateCheck);
+
+ enableTableMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.EnableTableStateData enableTableMsg =
+ MasterProcedureProtos.EnableTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(enableTableMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(enableTableMsg.getTableName());
+ skipTableStateCheck = enableTableMsg.getSkipTableStateCheck();
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.ENABLE;
+ }
+
+
+ /**
+ * Action before any real action of enabling table. Set the exception in the procedure instead
+ * of throwing it. This approach is to deal with backward compatible with 1.0.
+ * @param env MasterProcedureEnv
+ * @return whether the table passes the necessary checks
+ * @throws IOException
+ */
+ private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
+ boolean canTableBeEnabled = true;
+
+ // Check whether table exists
+ if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ setFailure("master-enable-table", new TableNotFoundException(tableName));
+ canTableBeEnabled = false;
+ } else if (!skipTableStateCheck) {
+ // There could be multiple client requests trying to disable or enable
+ // the table at the same time. Ensure only the first request is honored
+ // After that, no other requests can be accepted until the table reaches
+ // DISABLED or ENABLED.
+ //
+ // Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
+ // the state to ENABLING from DISABLED. The implementation was done before table lock
+ // was implemented. With table lock, there is no need to set the state here (it will
+ // set the state later on). A quick state check should be enough for us to move forward.
+ TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
+ if (!tsm.getTableState(tableName).equals(TableState.State.DISABLED)) {
+ LOG.info("Table " + tableName + " isn't disabled; skipping enable");
+ setFailure("master-enable-table", new TableNotDisabledException(this.tableName));
+ canTableBeEnabled = false;
+ }
+ }
+
+ // We are done the check. Future actions in this procedure could be done asynchronously.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+
+ return canTableBeEnabled;
+ }
+
+ /**
+ * Action before enabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preEnable(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Mark table state to Enabling
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @throws IOException
+ */
+ protected static void setTableStateToEnabling(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ // Set table disabling flag up in zk.
+ LOG.info("Attempting to enable the table " + tableName);
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ TableState.State.ENABLING);
+ }
+
+ /**
+ * Mark offline regions of the table online with retry
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @param retryRequired whether to retry if the first run failed
+ * @throws IOException
+ */
+ protected static void markRegionsOnline(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final Boolean retryRequired) throws IOException {
+ // This is best effort approach to make all regions of a table online. If we fail to do
+ // that, it is ok that the table has some offline regions; user can fix it manually.
+
+ // Dev consideration: add a config to control max number of retry. For now, it is hard coded.
+ int maxTry = (retryRequired ? 10 : 1);
+ boolean done = false;
+
+ do {
+ try {
+ done = markRegionsOnline(env, tableName);
+ if (done) {
+ break;
+ }
+ maxTry--;
+ } catch (Exception e) {
+ LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e);
+ maxTry--;
+ if (maxTry > 0) {
+ continue; // we still have some retry left, try again.
+ }
+ throw e;
+ }
+ } while (maxTry > 0);
+
+ if (!done) {
+ LOG.warn("Some or all regions of the Table '" + tableName + "' were offline");
+ }
+ }
+
+ /**
+ * Mark offline regions of the table online
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @return whether the operation is fully completed or being interrupted.
+ * @throws IOException
+ */
+ private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
+ final MasterServices masterServices = env.getMasterServices();
+ final ServerManager serverManager = masterServices.getServerManager();
+ boolean done = false;
+ // Get the regions of this table. We're done when all listed
+ // tables are onlined.
+ List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations;
+
+ if (TableName.META_TABLE_NAME.equals(tableName)) {
+ tableRegionsAndLocations =
+ new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper());
+ } else {
+ tableRegionsAndLocations =
+ MetaTableAccessor.getTableRegionsAndLocations(masterServices.getConnection(), tableName);
+ }
+
+ int countOfRegionsInTable = tableRegionsAndLocations.size();
+ Map<HRegionInfo, ServerName> regionsToAssign =
+ regionsToAssignWithServerName(env, tableRegionsAndLocations);
+
+ // need to potentially create some regions for the replicas
+ List<HRegionInfo> unrecordedReplicas =
+ AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet<HRegionInfo>(
+ regionsToAssign.keySet()), masterServices);
+ Map<ServerName, List<HRegionInfo>> srvToUnassignedRegs =
+ assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas,
+ serverManager.getOnlineServersList());
+ if (srvToUnassignedRegs != null) {
+ for (Map.Entry<ServerName, List<HRegionInfo>> entry : srvToUnassignedRegs.entrySet()) {
+ for (HRegionInfo h : entry.getValue()) {
+ regionsToAssign.put(h, entry.getKey());
+ }
+ }
+ }
+
+ int offlineRegionsCount = regionsToAssign.size();
+
+ LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which "
+ + offlineRegionsCount + " are offline.");
+ if (offlineRegionsCount == 0) {
+ return true;
+ }
+
+ List<ServerName> onlineServers = serverManager.createDestinationServersList();
+ Map<ServerName, List<HRegionInfo>> bulkPlan =
+ env.getMasterServices().getAssignmentManager().getBalancer()
+ .retainAssignment(regionsToAssign, onlineServers);
+ if (bulkPlan != null) {
+ LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size()
+ + " server(s), retainAssignment=true");
+
+ BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true);
+ try {
+ if (ba.bulkAssign()) {
+ done = true;
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ }
+ } else {
+ LOG.info("Balancer was unable to find suitable servers for table " + tableName
+ + ", leaving unassigned");
+ }
+ return done;
+ }
+
+ /**
+ * Mark regions of the table offline during recovery
+ * @param env MasterProcedureEnv
+ */
+ private void markRegionsOfflineDuringRecovery(final MasterProcedureEnv env) {
+ try {
+ // This is a best effort attempt. We will move on even it does not succeed. We will retry
+ // several times until we giving up.
+ DisableTableProcedure.markRegionsOffline(env, tableName, true);
+ } catch (Exception e) {
+ LOG.debug("Failed to offline all regions of table " + tableName + ". Ignoring", e);
+ }
+ }
+
+ /**
+ * Mark table state to Enabled
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ protected static void setTableStateToEnabled(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ // Flip the table to Enabled
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ TableState.State.ENABLED);
+ LOG.info("Table '" + tableName + "' was successfully enabled.");
+ }
+
+ /**
+ * Action after enabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postEnable(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ /**
+ * @param regionsInMeta
+ * @return List of regions neither in transition nor assigned.
+ * @throws IOException
+ */
+ private static Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
+ final MasterProcedureEnv env,
+ final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
+ Map<HRegionInfo, ServerName> regionsToAssign =
+ new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
+ RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates();
+ for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
+ HRegionInfo hri = regionLocation.getFirst();
+ ServerName sn = regionLocation.getSecond();
+ if (regionStates.isRegionOffline(hri)) {
+ regionsToAssign.put(hri, sn);
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Skipping assign for the region " + hri + " during enable table "
+ + hri.getTable() + " because its already in tranition or assigned.");
+ }
+ }
+ }
+ return regionsToAssign;
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case ENABLE_TABLE_PRE_OPERATION:
+ cpHost.preEnableTableHandler(getTableName());
+ break;
+ case ENABLE_TABLE_POST_OPERATION:
+ cpHost.postEnableTableHandler(getTableName());
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
index 76ca094..6928d02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.hbase.master.procedure;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.TableName;
/**
* Procedures that operates on a specific Table (e.g. create, delete, snapshot, ...)
@@ -29,7 +29,9 @@ import org.apache.hadoop.hbase.TableName;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface TableProcedureInterface {
- public enum TableOperationType { CREATE, DELETE, EDIT, READ };
+ public enum TableOperationType {
+ CREATE, DELETE, DISABLE, EDIT, ENABLE, READ,
+ };
/**
* @return the name of the table the procedure is operating on
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index bc97bb9..9bb436e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -36,7 +36,9 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -172,6 +174,18 @@ public class MasterProcedureTestingUtility {
return actualRegCount.get();
}
+ public static void validateTableIsEnabled(final HMaster master, final TableName tableName)
+ throws IOException {
+ TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
+ assertTrue(tsm.getTableState(tableName).equals(TableState.State.ENABLED));
+ }
+
+ public static void validateTableIsDisabled(final HMaster master, final TableName tableName)
+ throws IOException {
+ TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
+ assertTrue(tsm.getTableState(tableName).equals(TableState.State.DISABLED));
+ }
+
public static <TState> void testRecoveryAndDoubleExecution(
final ProcedureExecutor<MasterProcedureEnv> procExec, final long procId,
final int numSteps, final TState[] states) throws Exception {
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java
new file mode 100644
index 0000000..0537ccc
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestDisableTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestDisableTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testDisableTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testDisableTable");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // Disable the table
+ long procId = procExec.submitProcedure(
+ new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout = 60000)
+ public void testDisableTableMultipleTimes() throws Exception {
+ final TableName tableName = TableName.valueOf("testDisableTableMultipleTimes");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // Disable the table
+ long procId1 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+
+ // Disable the table again - expect failure
+ long procId2 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Disable failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotEnabledException);
+
+ // Disable the table - expect failure from ProcedurePrepareLatch
+ try {
+ final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch();
+
+ long procId3 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, false, prepareLatch));
+ prepareLatch.await();
+ Assert.fail("Disable should throw exception through latch.");
+ } catch (TableNotEnabledException tnee) {
+ // Expected
+ LOG.debug("Disable failed with expected exception.");
+ }
+
+ // Disable the table again with skipping table state check flag (simulate recovery scenario)
+ long procId4 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, true));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId4);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId4);
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Disable procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName,
+ false));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = DisableTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ DisableTableState.values());
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java
new file mode 100644
index 0000000..12c78e8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java
@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestEnableTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestEnableTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testEnableTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testEnableTable");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // Enable the table
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout=60000, expected=TableNotDisabledException.class)
+ public void testEnableNonDisabledTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testEnableNonExistingTable");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // Enable the table - expect failure
+ long procId1 = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+
+ ProcedureResult result = procExec.getResult(procId1);
+ assertTrue(result.isFailed());
+ LOG.debug("Enable failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotDisabledException);
+
+ // Enable the table with skipping table state check flag (simulate recovery scenario)
+ long procId2 = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, true));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+
+ // Enable the table - expect failure from ProcedurePrepareLatch
+ final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch();
+ long procId3 = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false, prepareLatch));
+ prepareLatch.await();
+ Assert.fail("Enable should throw exception through latch.");
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Enable procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = EnableTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ EnableTableState.values());
+ MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Enable procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+
+ int numberOfSteps = EnableTableState.values().length - 2; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ EnableTableState.values());
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/57c70f0a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index faf7845..0f6c910 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -37,8 +37,11 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -212,6 +215,79 @@ public class TestMasterFailoverWithProcedures {
}
// ==========================================================================
+ // Test Disable Table
+ // ==========================================================================
+ @Test(timeout=60000)
+ public void testDisableTableWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestDisableTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testDisableTableWithFailoverAtStep(
+ DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE.ordinal());
+ }
+
+ private void testDisableTableWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testDisableTableWithFailoverAtStep" + step);
+
+ // create the table
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+
+ ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, DisableTableState.values());
+
+ MasterProcedureTestingUtility.validateTableIsDisabled(
+ UTIL.getHBaseCluster().getMaster(), tableName);
+ }
+
+ // ==========================================================================
+ // Test Enable Table
+ // ==========================================================================
+ @Test(timeout=60000)
+ public void testEnableTableWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestEnableTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testEnableTableWithFailoverAtStep(
+ EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE.ordinal());
+ }
+
+ private void testEnableTableWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testEnableTableWithFailoverAtStep" + step);
+
+ // create the table
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, EnableTableState.values());
+
+ MasterProcedureTestingUtility.validateTableIsEnabled(
+ UTIL.getHBaseCluster().getMaster(), tableName);
+ }
+
+ // ==========================================================================
// Test Helpers
// ==========================================================================
public static <TState> void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil,
[33/50] [abbrv] hbase git commit: HBASE-13301 Possible memory leak in
BucketCache
Posted by jm...@apache.org.
HBASE-13301 Possible memory leak in BucketCache
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f151444
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f151444
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f151444
Branch: refs/heads/hbase-11339
Commit: 4f151444b58ae85b93f76254961358932e0ffb9b
Parents: 71536bd
Author: zhangduo <zh...@wandoujia.com>
Authored: Sat Apr 11 10:43:43 2015 +0800
Committer: zhangduo <zh...@wandoujia.com>
Committed: Tue Apr 14 17:41:46 2015 +0800
----------------------------------------------------------------------
.../hbase/io/hfile/bucket/BucketCache.java | 182 +++++++++++--------
.../hbase/io/hfile/bucket/CachedEntryQueue.java | 20 +-
.../org/apache/hadoop/hbase/util/IdLock.java | 16 ++
.../hadoop/hbase/io/hfile/CacheTestUtils.java | 6 +-
.../hbase/io/hfile/bucket/TestBucketCache.java | 87 ++++++---
5 files changed, 196 insertions(+), 115 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/4f151444/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 7dda0e6..6a5c884 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -39,6 +39,7 @@ import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@@ -109,13 +110,14 @@ public class BucketCache implements BlockCache, HeapSize {
final static int DEFAULT_WRITER_QUEUE_ITEMS = 64;
// Store/read block data
- IOEngine ioEngine;
+ final IOEngine ioEngine;
// Store the block in this map before writing it to cache
@VisibleForTesting
- Map<BlockCacheKey, RAMQueueEntry> ramCache;
+ final ConcurrentMap<BlockCacheKey, RAMQueueEntry> ramCache;
// In this map, store the block's meta data like offset, length
- private Map<BlockCacheKey, BucketEntry> backingMap;
+ @VisibleForTesting
+ ConcurrentMap<BlockCacheKey, BucketEntry> backingMap;
/**
* Flag if the cache is enabled or not... We shut it off if there are IO
@@ -132,14 +134,14 @@ public class BucketCache implements BlockCache, HeapSize {
* to the BucketCache. It then updates the ramCache and backingMap accordingly.
*/
@VisibleForTesting
- ArrayList<BlockingQueue<RAMQueueEntry>> writerQueues =
+ final ArrayList<BlockingQueue<RAMQueueEntry>> writerQueues =
new ArrayList<BlockingQueue<RAMQueueEntry>>();
@VisibleForTesting
- WriterThread writerThreads[];
+ final WriterThread[] writerThreads;
/** Volatile boolean to track if free space is in process or not */
private volatile boolean freeInProgress = false;
- private Lock freeSpaceLock = new ReentrantLock();
+ private final Lock freeSpaceLock = new ReentrantLock();
private UniqueIndexMap<Integer> deserialiserMap = new UniqueIndexMap<Integer>();
@@ -152,17 +154,16 @@ public class BucketCache implements BlockCache, HeapSize {
/** Cache access count (sequential ID) */
private final AtomicLong accessCount = new AtomicLong(0);
- private final Object[] cacheWaitSignals;
private static final int DEFAULT_CACHE_WAIT_TIME = 50;
// Used in test now. If the flag is false and the cache speed is very fast,
// bucket cache will skip some blocks when caching. If the flag is true, we
// will wait blocks flushed to IOEngine for some time when caching
boolean wait_when_cache = false;
- private BucketCacheStats cacheStats = new BucketCacheStats();
+ private final BucketCacheStats cacheStats = new BucketCacheStats();
- private String persistencePath;
- private long cacheCapacity;
+ private final String persistencePath;
+ private final long cacheCapacity;
/** Approximate block size */
private final long blockSize;
@@ -182,7 +183,8 @@ public class BucketCache implements BlockCache, HeapSize {
*
* TODO:We could extend the IdLock to IdReadWriteLock for better.
*/
- private IdLock offsetLock = new IdLock();
+ @VisibleForTesting
+ final IdLock offsetLock = new IdLock();
private final ConcurrentIndex<String, BlockCacheKey> blocksByHFile =
new ConcurrentIndex<String, BlockCacheKey>(new Comparator<BlockCacheKey>() {
@@ -216,7 +218,6 @@ public class BucketCache implements BlockCache, HeapSize {
throws FileNotFoundException, IOException {
this.ioEngine = getIOEngineFromName(ioEngineName, capacity);
this.writerThreads = new WriterThread[writerThreadNum];
- this.cacheWaitSignals = new Object[writerThreadNum];
long blockNumCapacity = capacity / blockSize;
if (blockNumCapacity >= Integer.MAX_VALUE) {
// Enough for about 32TB of cache!
@@ -231,7 +232,6 @@ public class BucketCache implements BlockCache, HeapSize {
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
for (int i = 0; i < writerThreads.length; ++i) {
writerQueues.add(new ArrayBlockingQueue<RAMQueueEntry>(writerQLen));
- this.cacheWaitSignals[i] = new Object();
}
assert writerQueues.size() == writerThreads.length;
@@ -252,7 +252,7 @@ public class BucketCache implements BlockCache, HeapSize {
final String threadName = Thread.currentThread().getName();
this.cacheEnabled = true;
for (int i = 0; i < writerThreads.length; ++i) {
- writerThreads[i] = new WriterThread(writerQueues.get(i), i);
+ writerThreads[i] = new WriterThread(writerQueues.get(i));
writerThreads[i].setName(threadName + "-BucketCacheWriter-" + i);
writerThreads[i].setDaemon(true);
}
@@ -344,38 +344,39 @@ public class BucketCache implements BlockCache, HeapSize {
* @param inMemory if block is in-memory
* @param wait if true, blocking wait when queue is full
*/
- public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem,
- boolean inMemory, boolean wait) {
- if (!cacheEnabled)
+ public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
+ boolean wait) {
+ if (!cacheEnabled) {
return;
+ }
- if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey))
+ if (backingMap.containsKey(cacheKey)) {
return;
+ }
/*
- * Stuff the entry into the RAM cache so it can get drained to the
- * persistent store
+ * Stuff the entry into the RAM cache so it can get drained to the persistent store
*/
- RAMQueueEntry re = new RAMQueueEntry(cacheKey, cachedItem,
- accessCount.incrementAndGet(), inMemory);
- ramCache.put(cacheKey, re);
+ RAMQueueEntry re =
+ new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory);
+ if (ramCache.putIfAbsent(cacheKey, re) != null) {
+ return;
+ }
int queueNum = (cacheKey.hashCode() & 0x7FFFFFFF) % writerQueues.size();
BlockingQueue<RAMQueueEntry> bq = writerQueues.get(queueNum);
- boolean successfulAddition = bq.offer(re);
- if (!successfulAddition && wait) {
- synchronized (cacheWaitSignals[queueNum]) {
- try {
- successfulAddition = bq.offer(re);
- if (!successfulAddition) cacheWaitSignals[queueNum].wait(DEFAULT_CACHE_WAIT_TIME);
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- }
+ boolean successfulAddition = false;
+ if (wait) {
+ try {
+ successfulAddition = bq.offer(re, DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
}
+ } else {
successfulAddition = bq.offer(re);
}
if (!successfulAddition) {
- ramCache.remove(cacheKey);
- failedBlockAdditions.incrementAndGet();
+ ramCache.remove(cacheKey);
+ failedBlockAdditions.incrementAndGet();
} else {
this.blockNumber.incrementAndGet();
this.heapSize.addAndGet(cachedItem.heapSize());
@@ -394,11 +395,14 @@ public class BucketCache implements BlockCache, HeapSize {
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
- if (!cacheEnabled)
+ if (!cacheEnabled) {
return null;
+ }
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
- if (updateCacheMetrics) cacheStats.hit(caching);
+ if (updateCacheMetrics) {
+ cacheStats.hit(caching);
+ }
re.access(accessCount.incrementAndGet());
return re.getData();
}
@@ -408,6 +412,9 @@ public class BucketCache implements BlockCache, HeapSize {
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
+ // We can not read here even if backingMap does contain the given key because its offset
+ // maybe changed. If we lock BlockCacheKey instead of offset, then we can only check
+ // existence here.
if (bucketEntry.equals(backingMap.get(key))) {
int len = bucketEntry.getLength();
ByteBuffer bb = ByteBuffer.allocate(len);
@@ -438,13 +445,27 @@ public class BucketCache implements BlockCache, HeapSize {
}
}
}
- if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
+ if (!repeat && updateCacheMetrics) {
+ cacheStats.miss(caching);
+ }
return null;
}
+ @VisibleForTesting
+ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber) {
+ bucketAllocator.freeBlock(bucketEntry.offset());
+ realCacheSize.addAndGet(-1 * bucketEntry.getLength());
+ blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
+ if (decrementBlockNumber) {
+ this.blockNumber.decrementAndGet();
+ }
+ }
+
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
- if (!cacheEnabled) return false;
+ if (!cacheEnabled) {
+ return false;
+ }
RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
if (removedBlock != null) {
this.blockNumber.decrementAndGet();
@@ -462,13 +483,8 @@ public class BucketCache implements BlockCache, HeapSize {
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
- if (bucketEntry.equals(backingMap.remove(cacheKey))) {
- bucketAllocator.freeBlock(bucketEntry.offset());
- realCacheSize.addAndGet(-1 * bucketEntry.getLength());
- blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
- if (removedBlock == null) {
- this.blockNumber.decrementAndGet();
- }
+ if (backingMap.remove(cacheKey, bucketEntry)) {
+ blockEvicted(cacheKey, bucketEntry, removedBlock == null);
} else {
return false;
}
@@ -705,13 +721,10 @@ public class BucketCache implements BlockCache, HeapSize {
@VisibleForTesting
class WriterThread extends HasThread {
private final BlockingQueue<RAMQueueEntry> inputQueue;
- private final int threadNO;
private volatile boolean writerEnabled = true;
- WriterThread(BlockingQueue<RAMQueueEntry> queue, int threadNO) {
- super();
+ WriterThread(BlockingQueue<RAMQueueEntry> queue) {
this.inputQueue = queue;
- this.threadNO = threadNO;
}
// Used for test
@@ -728,9 +741,6 @@ public class BucketCache implements BlockCache, HeapSize {
try {
// Blocks
entries = getRAMQueueEntries(inputQueue, entries);
- synchronized (cacheWaitSignals[threadNO]) {
- cacheWaitSignals[threadNO].notifyAll();
- }
} catch (InterruptedException ie) {
if (!cacheEnabled) break;
}
@@ -755,7 +765,9 @@ public class BucketCache implements BlockCache, HeapSize {
*/
@VisibleForTesting
void doDrain(final List<RAMQueueEntry> entries) throws InterruptedException {
- if (entries.isEmpty()) return;
+ if (entries.isEmpty()) {
+ return;
+ }
// This method is a little hard to follow. We run through the passed in entries and for each
// successful add, we add a non-null BucketEntry to the below bucketEntries. Later we must
// do cleanup making sure we've cleared ramCache of all entries regardless of whether we
@@ -830,6 +842,21 @@ public class BucketCache implements BlockCache, HeapSize {
RAMQueueEntry ramCacheEntry = ramCache.remove(key);
if (ramCacheEntry != null) {
heapSize.addAndGet(-1 * entries.get(i).getData().heapSize());
+ } else if (bucketEntries[i] != null){
+ // Block should have already been evicted. Remove it and free space.
+ IdLock.Entry lockEntry = null;
+ try {
+ lockEntry = offsetLock.getLockEntry(bucketEntries[i].offset());
+ if (backingMap.remove(key, bucketEntries[i])) {
+ blockEvicted(key, bucketEntries[i], false);
+ }
+ } catch (IOException e) {
+ LOG.warn("failed to free space for " + key, e);
+ } finally {
+ if (lockEntry != null) {
+ offsetLock.releaseLockEntry(lockEntry);
+ }
+ }
}
}
@@ -1055,23 +1082,35 @@ public class BucketCache implements BlockCache, HeapSize {
* up the long. Doubt we'll see devices this big for ages. Offsets are divided
* by 256. So 5 bytes gives us 256TB or so.
*/
- static class BucketEntry implements Serializable, Comparable<BucketEntry> {
+ static class BucketEntry implements Serializable {
private static final long serialVersionUID = -6741504807982257534L;
+
+ // access counter comparator, descending order
+ static final Comparator<BucketEntry> COMPARATOR = new Comparator<BucketCache.BucketEntry>() {
+
+ @Override
+ public int compare(BucketEntry o1, BucketEntry o2) {
+ long accessCounter1 = o1.accessCounter;
+ long accessCounter2 = o2.accessCounter;
+ return accessCounter1 < accessCounter2 ? 1 : accessCounter1 == accessCounter2 ? 0 : -1;
+ }
+ };
+
private int offsetBase;
private int length;
private byte offset1;
byte deserialiserIndex;
- private volatile long accessTime;
+ private volatile long accessCounter;
private BlockPriority priority;
/**
* Time this block was cached. Presumes we are created just before we are added to the cache.
*/
private final long cachedTime = System.nanoTime();
- BucketEntry(long offset, int length, long accessTime, boolean inMemory) {
+ BucketEntry(long offset, int length, long accessCounter, boolean inMemory) {
setOffset(offset);
this.length = length;
- this.accessTime = accessTime;
+ this.accessCounter = accessCounter;
if (inMemory) {
this.priority = BlockPriority.MEMORY;
} else {
@@ -1110,10 +1149,10 @@ public class BucketCache implements BlockCache, HeapSize {
}
/**
- * Block has been accessed. Update its local access time.
+ * Block has been accessed. Update its local access counter.
*/
- public void access(long accessTime) {
- this.accessTime = accessTime;
+ public void access(long accessCounter) {
+ this.accessCounter = accessCounter;
if (this.priority == BlockPriority.SINGLE) {
this.priority = BlockPriority.MULTI;
}
@@ -1123,17 +1162,6 @@ public class BucketCache implements BlockCache, HeapSize {
return this.priority;
}
- @Override
- public int compareTo(BucketEntry that) {
- if(this.accessTime == that.accessTime) return 0;
- return this.accessTime < that.accessTime ? 1 : -1;
- }
-
- @Override
- public boolean equals(Object that) {
- return this == that;
- }
-
public long getCachedTime() {
return cachedTime;
}
@@ -1204,14 +1232,14 @@ public class BucketCache implements BlockCache, HeapSize {
static class RAMQueueEntry {
private BlockCacheKey key;
private Cacheable data;
- private long accessTime;
+ private long accessCounter;
private boolean inMemory;
- public RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessTime,
+ public RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessCounter,
boolean inMemory) {
this.key = bck;
this.data = data;
- this.accessTime = accessTime;
+ this.accessCounter = accessCounter;
this.inMemory = inMemory;
}
@@ -1223,8 +1251,8 @@ public class BucketCache implements BlockCache, HeapSize {
return key;
}
- public void access(long accessTime) {
- this.accessTime = accessTime;
+ public void access(long accessCounter) {
+ this.accessCounter = accessCounter;
}
public BucketEntry writeToCache(final IOEngine ioEngine,
@@ -1236,7 +1264,7 @@ public class BucketCache implements BlockCache, HeapSize {
// This cacheable thing can't be serialized...
if (len == 0) return null;
long offset = bucketAllocator.allocateBlock(len);
- BucketEntry bucketEntry = new BucketEntry(offset, len, accessTime, inMemory);
+ BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory);
bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap);
try {
if (data instanceof HFileBlock) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/4f151444/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
index b6954bb..0e33a56 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java
@@ -54,23 +54,23 @@ public class CachedEntryQueue {
*/
public CachedEntryQueue(long maxSize, long blockSize) {
int initialSize = (int) (maxSize / blockSize);
- if (initialSize == 0)
+ if (initialSize == 0) {
initialSize++;
- queue = MinMaxPriorityQueue
- .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
- public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
- Entry<BlockCacheKey, BucketEntry> entry2) {
- return entry1.getValue().compareTo(entry2.getValue());
- }
+ }
+ queue = MinMaxPriorityQueue.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
+
+ public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
+ Entry<BlockCacheKey, BucketEntry> entry2) {
+ return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue());
+ }
- }).expectedSize(initialSize).create();
+ }).expectedSize(initialSize).create();
cacheSize = 0;
this.maxSize = maxSize;
}
/**
* Attempt to add the specified entry to this queue.
- *
* <p>
* If the queue is smaller than the max size, or if the specified element is
* ordered after the smallest element in the queue, the element will be added
@@ -83,7 +83,7 @@ public class CachedEntryQueue {
cacheSize += entry.getValue().getLength();
} else {
BucketEntry head = queue.peek().getValue();
- if (entry.getValue().compareTo(head) > 0) {
+ if (BucketEntry.COMPARATOR.compare(entry.getValue(), head) > 0) {
cacheSize += entry.getValue().getLength();
cacheSize -= head.getLength();
if (cacheSize > maxSize) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/4f151444/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
index b9d0983..fedf951 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
@@ -25,6 +25,8 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Allows multiple concurrent clients to lock on a numeric id with a minimal
* memory overhead. The intended usage is as follows:
@@ -119,4 +121,18 @@ public class IdLock {
assert map.size() == 0;
}
+ @VisibleForTesting
+ public void waitForWaiters(long id, int numWaiters) throws InterruptedException {
+ for (Entry entry;;) {
+ entry = map.get(id);
+ if (entry != null) {
+ synchronized (entry) {
+ if (entry.numWaiters >= numWaiters) {
+ return;
+ }
+ }
+ }
+ Thread.sleep(100);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4f151444/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index 5ef8cf0..b0a2ba2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -247,11 +247,11 @@ public class CacheTestUtils {
assertTrue(toBeTested.getStats().getEvictedCount() > 0);
}
- private static class ByteArrayCacheable implements Cacheable {
+ public static class ByteArrayCacheable implements Cacheable {
- static final CacheableDeserializer<Cacheable> blockDeserializer =
+ static final CacheableDeserializer<Cacheable> blockDeserializer =
new CacheableDeserializer<Cacheable>() {
-
+
@Override
public Cacheable deserialize(ByteBuffer b) throws IOException {
int len = b.getInt();
http://git-wip-us.apache.org/repos/asf/hbase/blob/4f151444/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index d29be01..99f5657 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.io.hfile.bucket;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -27,13 +28,14 @@ import java.util.Arrays;
import java.util.List;
import java.util.Random;
-import org.apache.hadoop.hbase.testclassification.IOTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.IdLock;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -44,24 +46,23 @@ import org.junit.runners.Parameterized;
/**
* Basic test of BucketCache.Puts and gets.
* <p>
- * Tests will ensure that blocks' data correctness under several threads
- * concurrency
+ * Tests will ensure that blocks' data correctness under several threads concurrency
*/
@RunWith(Parameterized.class)
-@Category({IOTests.class, SmallTests.class})
+@Category({ IOTests.class, SmallTests.class })
public class TestBucketCache {
private static final Random RAND = new Random();
- @Parameterized.Parameters(name="{index}: blockSize={0}, bucketSizes={1}")
+ @Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}")
public static Iterable<Object[]> data() {
return Arrays.asList(new Object[][] {
- { 8192, null }, // TODO: why is 8k the default blocksize for these tests?
- { 16 * 1024, new int[] {
- 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
- 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024,
- 128 * 1024 + 1024 } }
- });
+ { 8192, null }, // TODO: why is 8k the default blocksize for these tests?
+ {
+ 16 * 1024,
+ new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
+ 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024,
+ 128 * 1024 + 1024 } } });
}
@Parameterized.Parameter(0)
@@ -76,7 +77,7 @@ public class TestBucketCache {
final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS;
final int NUM_THREADS = 1000;
final int NUM_QUERIES = 10000;
-
+
final long capacitySize = 32 * 1024 * 1024;
final int writeThreads = BucketCache.DEFAULT_WRITER_THREADS;
final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS;
@@ -86,16 +87,16 @@ public class TestBucketCache {
private class MockedBucketCache extends BucketCache {
public MockedBucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes,
- int writerThreads, int writerQLen, String persistencePath)
- throws FileNotFoundException, IOException {
+ int writerThreads, int writerQLen, String persistencePath) throws FileNotFoundException,
+ IOException {
super(ioEngineName, capacity, blockSize, bucketSizes, writerThreads, writerQLen,
- persistencePath);
+ persistencePath);
super.wait_when_cache = true;
}
@Override
- public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf,
- boolean inMemory, boolean cacheDataInL1) {
+ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
+ boolean cacheDataInL1) {
if (super.getBlock(cacheKey, true, false, true) != null) {
throw new RuntimeException("Cached an already cached block");
}
@@ -113,8 +114,9 @@ public class TestBucketCache {
@Before
public void setup() throws FileNotFoundException, IOException {
- cache = new MockedBucketCache(ioEngineName, capacitySize, constructedBlockSize,
- constructedBlockSizes, writeThreads, writerQLen, persistencePath);
+ cache =
+ new MockedBucketCache(ioEngineName, capacitySize, constructedBlockSize,
+ constructedBlockSizes, writeThreads, writerQLen, persistencePath);
}
@After
@@ -142,7 +144,7 @@ public class TestBucketCache {
// Fill the allocated extents by choosing a random blocksize. Continues selecting blocks until
// the cache is completely filled.
List<Integer> tmp = new ArrayList<Integer>(BLOCKSIZES);
- for (int i = 0; !full; i++) {
+ while (!full) {
Integer blockSize = null;
try {
blockSize = randFrom(tmp);
@@ -156,9 +158,7 @@ public class TestBucketCache {
for (Integer blockSize : BLOCKSIZES) {
BucketSizeInfo bucketSizeInfo = mAllocator.roundUpToBucketSizeInfo(blockSize);
IndexStatistics indexStatistics = bucketSizeInfo.statistics();
- assertEquals(
- "unexpected freeCount for " + bucketSizeInfo,
- 0, indexStatistics.freeCount());
+ assertEquals("unexpected freeCount for " + bucketSizeInfo, 0, indexStatistics.freeCount());
}
for (long offset : allocations) {
@@ -182,4 +182,41 @@ public class TestBucketCache {
cache.stopWriterThreads();
CacheTestUtils.testHeapSizeChanges(cache, BLOCK_SIZE);
}
-}
\ No newline at end of file
+
+ // BucketCache.cacheBlock is async, it first adds block to ramCache and writeQueue, then writer
+ // threads will flush it to the bucket and put reference entry in backingMap.
+ private void cacheAndWaitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey,
+ Cacheable block) throws InterruptedException {
+ cache.cacheBlock(cacheKey, block);
+ while (!cache.backingMap.containsKey(cacheKey)) {
+ Thread.sleep(100);
+ }
+ }
+
+ @Test
+ public void testMemoryLeak() throws Exception {
+ final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L);
+ cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(
+ new byte[10]));
+ long lockId = cache.backingMap.get(cacheKey).offset();
+ IdLock.Entry lockEntry = cache.offsetLock.getLockEntry(lockId);
+ Thread evictThread = new Thread("evict-block") {
+
+ @Override
+ public void run() {
+ cache.evictBlock(cacheKey);
+ }
+
+ };
+ evictThread.start();
+ cache.offsetLock.waitForWaiters(lockId, 1);
+ cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true);
+ cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(
+ new byte[10]));
+ cache.offsetLock.releaseLockEntry(lockEntry);
+ evictThread.join();
+ assertEquals(1L, cache.getBlockCount());
+ assertTrue(cache.getCurrentSize() > 0L);
+ assertTrue("We should have a block!", cache.iterator().hasNext());
+ }
+}
[03/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-protocol/src/main/protobuf/Procedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Procedure.proto b/hbase-protocol/src/main/protobuf/Procedure.proto
new file mode 100644
index 0000000..232c290
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/Procedure.proto
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "ProcedureProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "ErrorHandling.proto";
+
+enum ProcedureState {
+ INITIALIZING = 1; // Procedure in construction, not yet added to the executor
+ RUNNABLE = 2; // Procedure added to the executor, and ready to be executed
+ WAITING = 3; // The procedure is waiting on children to be completed
+ WAITING_TIMEOUT = 4; // The procedure is waiting a timout or an external event
+ ROLLEDBACK = 5; // The procedure failed and was rolledback
+ FINISHED = 6; // The procedure execution is completed. may need a rollback if failed.
+}
+
+/**
+ * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
+ */
+message Procedure {
+ // internal "static" state
+ required string class_name = 1; // full classname to be able to instantiate the procedure
+ optional uint64 parent_id = 2; // parent if not a root-procedure otherwise not set
+ required uint64 proc_id = 3;
+ required uint64 start_time = 4;
+ optional string owner = 5;
+
+ // internal "runtime" state
+ required ProcedureState state = 6;
+ repeated uint32 stack_id = 7; // stack indices in case the procedure was running
+ required uint64 last_update = 8;
+ optional uint32 timeout = 9;
+
+ // user state/results
+ optional ForeignExceptionMessage exception = 10;
+ optional bytes result = 11; // opaque (user) result structure
+ optional bytes state_data = 12; // opaque (user) procedure internal-state
+}
+
+/**
+ * SequentialProcedure data
+ */
+message SequentialProcedureData {
+ required bool executed = 1;
+}
+
+/**
+ * StateMachineProcedure data
+ */
+message StateMachineProcedureData {
+ repeated uint32 state = 1;
+}
+
+/**
+ * Procedure WAL header
+ */
+message ProcedureWALHeader {
+ required uint32 version = 1;
+ required uint32 type = 2;
+ required uint64 log_id = 3;
+ required uint64 min_proc_id = 4;
+}
+
+/**
+ * Procedure WAL trailer
+ */
+message ProcedureWALTrailer {
+ required uint32 version = 1;
+ required uint64 tracker_pos = 2;
+}
+
+message ProcedureStoreTracker {
+ message TrackerNode {
+ required uint64 start_id = 1;
+ repeated uint64 updated = 2;
+ repeated uint64 deleted = 3;
+ }
+
+ repeated TrackerNode node = 1;
+}
+
+message ProcedureWALEntry {
+ enum Type {
+ EOF = 1;
+ INIT = 2;
+ INSERT = 3;
+ UPDATE = 4;
+ DELETE = 5;
+ COMPACT = 6;
+ }
+
+ required Type type = 1;
+ repeated Procedure procedure = 2;
+ optional uint64 proc_id = 3;
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f7ae209..ce49c04 100644
--- a/pom.xml
+++ b/pom.xml
@@ -56,6 +56,7 @@
<module>hbase-client</module>
<module>hbase-hadoop-compat</module>
<module>hbase-common</module>
+ <module>hbase-procedure</module>
<module>hbase-it</module>
<module>hbase-examples</module>
<module>hbase-prefix-tree</module>
@@ -871,7 +872,7 @@
</fileMapper>
</fileMappers>
<outputDir>${basedir}/target/asciidoc</outputDir>
- </transformationSet>
+ </transformationSet>
</transformationSets>
</configuration>
</plugin>
@@ -1014,7 +1015,7 @@
<plugin>
<groupId>org.asciidoctor</groupId>
<artifactId>asciidoctor-maven-plugin</artifactId>
- <version>1.5.2</version>
+ <version>1.5.2</version>
<inherited>false</inherited>
<dependencies>
<dependency>
@@ -1034,10 +1035,10 @@
</configuration>
<executions>
<execution>
- <id>output-html</id>
+ <id>output-html</id>
<phase>site</phase>
<goals>
- <goal>process-asciidoc</goal>
+ <goal>process-asciidoc</goal>
</goals>
<configuration>
<attributes>
@@ -1191,6 +1192,7 @@
Modules are pretty heavy-weight things, so doing this work isn't too bad. -->
<server.test.jar>hbase-server-${project.version}-tests.jar</server.test.jar>
<common.test.jar>hbase-common-${project.version}-tests.jar</common.test.jar>
+ <procedure.test.jar>hbase-procedure-${project.version}-tests.jar</procedure.test.jar>
<it.test.jar>hbase-it-${project.version}-tests.jar</it.test.jar>
<annotations.test.jar>hbase-annotations-${project.version}-tests.jar</annotations.test.jar>
<surefire.version>2.18</surefire.version>
@@ -1262,6 +1264,17 @@
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-procedure</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-procedure</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop-compat</artifactId>
<version>${project.version}</version>
</dependency>
[30/50] [abbrv] hbase git commit: HBASE-13419 Thrift gateway should
propagate text from exception causes (Michael Muller)
Posted by jm...@apache.org.
HBASE-13419 Thrift gateway should propagate text from exception causes (Michael Muller)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/679e0e8d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/679e0e8d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/679e0e8d
Branch: refs/heads/hbase-11339
Commit: 679e0e8d2efed297e5b44d17c69952372080b328
Parents: e75c620
Author: tedyu <yu...@gmail.com>
Authored: Mon Apr 13 09:58:04 2015 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Mon Apr 13 09:58:04 2015 -0700
----------------------------------------------------------------------
.../hadoop/hbase/thrift/ThriftServerRunner.java | 75 ++++++++++----------
1 file changed, 38 insertions(+), 37 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/679e0e8d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index 617fab6..4e4ade3 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -130,6 +130,7 @@ import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
import com.google.common.base.Joiner;
+import com.google.common.base.Throwables;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
@@ -755,7 +756,7 @@ public class ThriftServerRunner implements Runnable {
getAdmin().enableTable(getTableName(tableName));
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -765,7 +766,7 @@ public class ThriftServerRunner implements Runnable {
getAdmin().disableTable(getTableName(tableName));
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -775,7 +776,7 @@ public class ThriftServerRunner implements Runnable {
return this.connectionCache.getAdmin().isTableEnabled(getTableName(tableName));
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -788,7 +789,7 @@ public class ThriftServerRunner implements Runnable {
((HBaseAdmin) getAdmin()).compact(getBytes(tableNameOrRegionName));
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -801,7 +802,7 @@ public class ThriftServerRunner implements Runnable {
((HBaseAdmin) getAdmin()).majorCompact(getBytes(tableNameOrRegionName));
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -816,7 +817,7 @@ public class ThriftServerRunner implements Runnable {
return list;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -849,7 +850,7 @@ public class ThriftServerRunner implements Runnable {
return Collections.emptyList();
} catch (IOException e){
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -894,7 +895,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -937,7 +938,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -981,7 +982,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1038,7 +1039,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.rowResultFromHBase(result);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1103,7 +1104,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.rowResultFromHBase(result);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1135,7 +1136,7 @@ public class ThriftServerRunner implements Runnable {
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1157,7 +1158,7 @@ public class ThriftServerRunner implements Runnable {
table.delete(delete);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1178,10 +1179,10 @@ public class ThriftServerRunner implements Runnable {
getAdmin().createTable(desc);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
} catch (IllegalArgumentException e) {
LOG.warn(e.getMessage(), e);
- throw new IllegalArgument(e.getMessage());
+ throw new IllegalArgument(Throwables.getStackTraceAsString(e));
}
}
@@ -1202,7 +1203,7 @@ public class ThriftServerRunner implements Runnable {
getAdmin().deleteTable(tableName);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1260,10 +1261,10 @@ public class ThriftServerRunner implements Runnable {
table.put(put);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
} catch (IllegalArgumentException e) {
LOG.warn(e.getMessage(), e);
- throw new IllegalArgument(e.getMessage());
+ throw new IllegalArgument(Throwables.getStackTraceAsString(e));
}
}
@@ -1331,10 +1332,10 @@ public class ThriftServerRunner implements Runnable {
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
} catch (IllegalArgumentException e) {
LOG.warn(e.getMessage(), e);
- throw new IllegalArgument(e.getMessage());
+ throw new IllegalArgument(Throwables.getStackTraceAsString(e));
}
}
@@ -1360,7 +1361,7 @@ public class ThriftServerRunner implements Runnable {
getBytes(row), family, qualifier, amount);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1396,7 +1397,7 @@ public class ThriftServerRunner implements Runnable {
}
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
return ThriftUtilities.rowResultFromHBase(results, resultScannerWrapper.isColumnSorted());
}
@@ -1450,7 +1451,7 @@ public class ThriftServerRunner implements Runnable {
return addScanner(table.getScanner(scan), tScan.sortColumns);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1475,7 +1476,7 @@ public class ThriftServerRunner implements Runnable {
return addScanner(table.getScanner(scan), false);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1501,7 +1502,7 @@ public class ThriftServerRunner implements Runnable {
return addScanner(table.getScanner(scan), false);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1531,7 +1532,7 @@ public class ThriftServerRunner implements Runnable {
return addScanner(table.getScanner(scan), false);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1557,7 +1558,7 @@ public class ThriftServerRunner implements Runnable {
return addScanner(table.getScanner(scan), false);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1585,7 +1586,7 @@ public class ThriftServerRunner implements Runnable {
return addScanner(table.getScanner(scan), false);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1606,7 +1607,7 @@ public class ThriftServerRunner implements Runnable {
return columns;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1619,7 +1620,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1658,7 +1659,7 @@ public class ThriftServerRunner implements Runnable {
return region;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1696,7 +1697,7 @@ public class ThriftServerRunner implements Runnable {
table.increment(inc);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1724,7 +1725,7 @@ public class ThriftServerRunner implements Runnable {
return ThriftUtilities.cellFromHBase(result.rawCells());
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
}
}
@@ -1745,7 +1746,7 @@ public class ThriftServerRunner implements Runnable {
put.setDurability(mput.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
} catch (IllegalArgumentException e) {
LOG.warn(e.getMessage(), e);
- throw new IllegalArgument(e.getMessage());
+ throw new IllegalArgument(Throwables.getStackTraceAsString(e));
}
Table table = null;
@@ -1756,10 +1757,10 @@ public class ThriftServerRunner implements Runnable {
value != null ? getBytes(value) : HConstants.EMPTY_BYTE_ARRAY, put);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
- throw new IOError(e.getMessage());
+ throw new IOError(Throwables.getStackTraceAsString(e));
} catch (IllegalArgumentException e) {
LOG.warn(e.getMessage(), e);
- throw new IllegalArgument(e.getMessage());
+ throw new IllegalArgument(Throwables.getStackTraceAsString(e));
}
}
}
[46/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java
index e4cad6f,0000000..ba0b620
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java
@@@ -1,827 -1,0 +1,822 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.filecompactions;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.Delete;
- import org.apache.hadoop.hbase.client.Durability;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestMobFileCompactor {
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private Configuration conf = null;
+ private String tableNameAsString;
+ private TableName tableName;
- private HTable hTable;
++ private static Connection conn;
++ private BufferedMutator bufMut;
++ private Table hTable;
+ private Admin admin;
+ private HTableDescriptor desc;
+ private HColumnDescriptor hcd1;
+ private HColumnDescriptor hcd2;
+ private FileSystem fs;
- private final String family1 = "family1";
- private final String family2 = "family2";
- private final String qf1 = "qualifier1";
- private final String qf2 = "qualifier2";
- private byte[] KEYS = Bytes.toBytes("012");
- private int regionNum = KEYS.length;
- private int delRowNum = 1;
- private int delCellNum = 6;
- private int cellNumPerRow = 3;
- private int rowNumPerFile = 2;
++ private static final String family1 = "family1";
++ private static final String family2 = "family2";
++ private static final String qf1 = "qualifier1";
++ private static final String qf2 = "qualifier2";
++ private static byte[] KEYS = Bytes.toBytes("012");
++ private static int regionNum = KEYS.length;
++ private static int delRowNum = 1;
++ private static int delCellNum = 6;
++ private static int cellNumPerRow = 3;
++ private static int rowNumPerFile = 2;
+ private static ExecutorService pool;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ TEST_UTIL.getConfiguration().setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD, 5000);
+ TEST_UTIL.startMiniCluster(1);
+ pool = createThreadPool(TEST_UTIL.getConfiguration());
++ conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), pool);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ pool.shutdown();
++ conn.close();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ fs = TEST_UTIL.getTestFileSystem();
+ conf = TEST_UTIL.getConfiguration();
+ long tid = System.currentTimeMillis();
+ tableNameAsString = "testMob" + tid;
+ tableName = TableName.valueOf(tableNameAsString);
+ hcd1 = new HColumnDescriptor(family1);
+ hcd1.setMobEnabled(true);
+ hcd1.setMobThreshold(0L);
+ hcd1.setMaxVersions(4);
+ hcd2 = new HColumnDescriptor(family2);
+ hcd2.setMobEnabled(true);
+ hcd2.setMobThreshold(0L);
+ hcd2.setMaxVersions(4);
+ desc = new HTableDescriptor(tableName);
+ desc.addFamily(hcd1);
+ desc.addFamily(hcd2);
+ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc, getSplitKeys());
- hTable = new HTable(conf, tableNameAsString);
- hTable.setAutoFlush(false, false);
++ hTable = conn.getTable(tableName);
++ bufMut = conn.getBufferedMutator(tableName);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ admin.disableTable(tableName);
+ admin.deleteTable(tableName);
+ admin.close();
+ hTable.close();
+ fs.delete(TEST_UTIL.getDataTestDir(), true);
+ }
+
+ @Test
+ public void testCompactionWithoutDelFilesWithNamespace() throws Exception {
+ resetConf();
+ // create a table with namespace
+ NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build();
+ String tableNameAsString = "ns:testCompactionWithoutDelFilesWithNamespace";
+ admin.createNamespace(namespaceDescriptor);
+ TableName tableName = TableName.valueOf(tableNameAsString);
+ HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
+ hcd1.setMobEnabled(true);
+ hcd1.setMobThreshold(0L);
+ hcd1.setMaxVersions(4);
+ HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
+ hcd2.setMobEnabled(true);
+ hcd2.setMobThreshold(0L);
+ hcd2.setMaxVersions(4);
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addFamily(hcd1);
+ desc.addFamily(hcd2);
+ admin.createTable(desc, getSplitKeys());
- HTable table = new HTable(conf, tableName);
- table.setAutoFlush(false, false);
++ BufferedMutator bufMut= conn.getBufferedMutator(tableName);
++ Table table = conn.getTable(tableName);
+
+ int count = 4;
+ // generate mob files
- loadData(admin, table, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count * rowNumPerFile;
+
+ assertEquals("Before compaction: mob rows count", regionNum * rowNumPerRegion,
+ countMobRows(table));
+ assertEquals("Before compaction: mob file count", regionNum * count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: del file count", 0, countFiles(tableName, false, family1));
+
+ MobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs, tableName, hcd1, pool);
+ compactor.compact();
+
+ assertEquals("After compaction: mob rows count", regionNum * rowNumPerRegion,
+ countMobRows(table));
+ assertEquals("After compaction: mob file count", regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: del file count", 0, countFiles(tableName, false, family1));
+
+ table.close();
+ admin.disableTable(tableName);
+ admin.deleteTable(tableName);
+ admin.deleteNamespace("ns");
+ }
+
+ @Test
+ public void testCompactionWithoutDelFiles() throws Exception {
+ resetConf();
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ assertEquals("Before compaction: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob file count", regionNum * count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: del file count", 0, countFiles(tableName, false, family1));
+
+ MobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs, tableName, hcd1, pool);
+ compactor.compact();
+
+ assertEquals("After compaction: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("After compaction: mob file count", regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: del file count", 0, countFiles(tableName, false, family1));
+ }
+
+ @Test
+ public void testCompactionWithDelFiles() throws Exception {
+ resetConf();
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("Before deleting: mob cells count", regionNum*cellNumPerRow*rowNumPerRegion,
+ countMobCells(hTable));
+ assertEquals("Before deleting: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before deleting: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+
+ createDelFile();
+
+ assertEquals("Before compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("Before compaction: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: family2 file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("Before compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("Before compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+
+ // do the mob file compaction
+ MobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs, tableName, hcd1, pool);
+ compactor.compact();
+
+ assertEquals("After compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("After compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("After compaction: family1 mob file count", regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After compaction: family1 del file count", 0,
+ countFiles(tableName, false, family1));
+ assertEquals("After compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+ assertRefFileNameEqual(family1);
+ }
+
+ @Test
+ public void testCompactionWithDelFilesAndNotMergeAllFiles() throws Exception {
+ resetConf();
+ int mergeSize = 5000;
+ // change the mob compaction merge size
+ conf.setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
+
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("Before deleting: mob cells count", regionNum*cellNumPerRow*rowNumPerRegion,
+ countMobCells(hTable));
+ assertEquals("Before deleting: mob file count", regionNum * count,
+ countFiles(tableName, true, family1));
+
+ int largeFilesCount = countLargeFiles(mergeSize, family1);
+ createDelFile();
+
+ assertEquals("Before compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("Before compaction: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("Before compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("Before compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+
+ // do the mob file compaction
+ MobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs, tableName, hcd1, pool);
+ compactor.compact();
+
+ assertEquals("After compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("After compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ // After the compaction, the files smaller than the mob compaction merge size
+ // is merge to one file
+ assertEquals("After compaction: family1 mob file count", largeFilesCount + regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("After compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+ }
+
+ @Test
+ public void testCompactionWithDelFilesAndWithSmallCompactionBatchSize() throws Exception {
+ resetConf();
+ int batchSize = 2;
+ conf.setInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE, batchSize);
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ assertEquals("Before deleting: mob row count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("Before deleting: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before deleting: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+
+ createDelFile();
+
+ assertEquals("Before compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("Before compaction: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("Before compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("Before compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+
+ // do the mob file compaction
+ MobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs, tableName, hcd1, pool);
+ compactor.compact();
+
+ assertEquals("After compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("After compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("After compaction: family1 mob file count", regionNum*(count/batchSize),
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After compaction: family1 del file count", 0,
+ countFiles(tableName, false, family1));
+ assertEquals("After compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+ }
+
+ @Test
+ public void testCompactionWithHFileLink() throws IOException, InterruptedException {
+ resetConf();
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ long tid = System.currentTimeMillis();
+ byte[] snapshotName1 = Bytes.toBytes("snaptb-" + tid);
+ // take a snapshot
+ admin.snapshot(snapshotName1, tableName);
+
+ createDelFile();
+
+ assertEquals("Before compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("Before compaction: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("Before compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("Before compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+
+ // do the mob file compaction
+ MobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs, tableName, hcd1, pool);
+ compactor.compact();
+
+ assertEquals("After first compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("After first compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("After first compaction: family1 mob file count", regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After first compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After first compaction: family1 del file count", 0,
+ countFiles(tableName, false, family1));
+ assertEquals("After first compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+ assertEquals("After first compaction: family1 hfilelink count", 0, countHFileLinks(family1));
+ assertEquals("After first compaction: family2 hfilelink count", 0, countHFileLinks(family2));
+
+ admin.disableTable(tableName);
+ // Restore from snapshot, the hfilelink will exist in mob dir
+ admin.restoreSnapshot(snapshotName1);
+ admin.enableTable(tableName);
+
+ assertEquals("After restoring snapshot: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("After restoring snapshot: mob cells count",
+ regionNum*cellNumPerRow*rowNumPerRegion, countMobCells(hTable));
+ assertEquals("After restoring snapshot: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("After restoring snapshot: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After restoring snapshot: family1 del file count", 0,
+ countFiles(tableName, false, family1));
+ assertEquals("After restoring snapshot: family2 del file count", 0,
+ countFiles(tableName, false, family2));
+ assertEquals("After restoring snapshot: family1 hfilelink count", regionNum*count,
+ countHFileLinks(family1));
+ assertEquals("After restoring snapshot: family2 hfilelink count", 0,
+ countHFileLinks(family2));
+
+ compactor.compact();
+
+ assertEquals("After second compaction: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("After second compaction: mob cells count",
+ regionNum*cellNumPerRow*rowNumPerRegion, countMobCells(hTable));
+ assertEquals("After second compaction: family1 mob file count", regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After second compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After second compaction: family1 del file count", 0,
+ countFiles(tableName, false, family1));
+ assertEquals("After second compaction: family2 del file count", 0,
+ countFiles(tableName, false, family2));
+ assertEquals("After second compaction: family1 hfilelink count", 0, countHFileLinks(family1));
+ assertEquals("After second compaction: family2 hfilelink count", 0, countHFileLinks(family2));
+ assertRefFileNameEqual(family1);
+ }
+
+ @Test
+ public void testCompactionFromAdmin() throws Exception {
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("Before deleting: mob cells count", regionNum*cellNumPerRow*rowNumPerRegion,
+ countMobCells(hTable));
+ assertEquals("Before deleting: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before deleting: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+
+ createDelFile();
+
+ assertEquals("Before compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("Before compaction: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: family2 file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("Before compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("Before compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+
+ int largeFilesCount = countLargeFiles(5000, family1);
+ // do the mob file compaction
+ admin.compactMob(tableName, hcd1.getName());
+
+ waitUntilCompactionFinished(tableName);
+ assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum),
+ countMobRows(hTable));
+ assertEquals("After compaction: mob cells count", regionNum
+ * (cellNumPerRow * rowNumPerRegion - delCellNum), countMobCells(hTable));
+ assertEquals("After compaction: family1 mob file count", regionNum + largeFilesCount,
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: family2 mob file count", regionNum * count,
+ countFiles(tableName, true, family2));
+ assertEquals("After compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("After compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+ assertRefFileNameEqual(family1);
+ }
+
+ @Test
+ public void testMajorCompactionFromAdmin() throws Exception {
+ int count = 4;
+ // generate mob files
- loadData(admin, hTable, tableName, count, rowNumPerFile);
++ loadData(admin, bufMut, tableName, count, rowNumPerFile);
+ int rowNumPerRegion = count*rowNumPerFile;
+
+ assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion,
+ countMobRows(hTable));
+ assertEquals("Before deleting: mob cells count", regionNum*cellNumPerRow*rowNumPerRegion,
+ countMobCells(hTable));
+ assertEquals("Before deleting: mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+
+ createDelFile();
+
+ assertEquals("Before compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("Before compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("Before compaction: family1 mob file count", regionNum*count,
+ countFiles(tableName, true, family1));
+ assertEquals("Before compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("Before compaction: family1 del file count", regionNum,
+ countFiles(tableName, false, family1));
+ assertEquals("Before compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+
+ // do the major mob file compaction, it will force all files to compaction
+ admin.majorCompactMob(tableName, hcd1.getName());
+
+ waitUntilCompactionFinished(tableName);
+ assertEquals("After compaction: mob rows count", regionNum*(rowNumPerRegion-delRowNum),
+ countMobRows(hTable));
+ assertEquals("After compaction: mob cells count",
+ regionNum*(cellNumPerRow*rowNumPerRegion-delCellNum), countMobCells(hTable));
+ assertEquals("After compaction: family1 mob file count", regionNum,
+ countFiles(tableName, true, family1));
+ assertEquals("After compaction: family2 mob file count", regionNum*count,
+ countFiles(tableName, true, family2));
+ assertEquals("After compaction: family1 del file count", 0,
+ countFiles(tableName, false, family1));
+ assertEquals("After compaction: family2 del file count", regionNum,
+ countFiles(tableName, false, family2));
+ }
+
+ private void waitUntilCompactionFinished(TableName tableName) throws IOException,
+ InterruptedException {
+ long finished = EnvironmentEdgeManager.currentTime() + 60000;
+ CompactionState state = admin.getMobCompactionState(tableName);
+ while (EnvironmentEdgeManager.currentTime() < finished) {
+ if (state == CompactionState.NONE) {
+ break;
+ }
+ state = admin.getMobCompactionState(tableName);
+ Thread.sleep(10);
+ }
+ assertEquals(CompactionState.NONE, state);
+ }
+
+ /**
+ * Gets the number of rows in the given table.
+ * @param table to get the scanner
+ * @return the number of rows
+ */
- private int countMobRows(final HTable table) throws IOException {
++ private int countMobRows(final Table table) throws IOException {
+ Scan scan = new Scan();
+ // Do not retrieve the mob data when scanning
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ count++;
+ }
+ results.close();
+ return count;
+ }
+
+ /**
+ * Gets the number of cells in the given table.
+ * @param table to get the scanner
+ * @return the number of cells
+ */
- private int countMobCells(final HTable table) throws IOException {
++ private int countMobCells(final Table table) throws IOException {
+ Scan scan = new Scan();
+ // Do not retrieve the mob data when scanning
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ for (Cell cell : res.listCells()) {
+ count++;
+ }
+ }
+ results.close();
+ return count;
+ }
+
+ /**
+ * Gets the number of files in the mob path.
+ * @param isMobFile gets number of the mob files or del files
+ * @param familyName the family name
+ * @return the number of the files
+ */
+ private int countFiles(TableName tableName, boolean isMobFile, String familyName)
+ throws IOException {
+ Path mobDirPath = MobUtils.getMobFamilyPath(
+ MobUtils.getMobRegionPath(conf, tableName), familyName);
+ int count = 0;
+ if (fs.exists(mobDirPath)) {
+ FileStatus[] files = fs.listStatus(mobDirPath);
+ for (FileStatus file : files) {
+ if (isMobFile == true) {
+ if (!StoreFileInfo.isDelFile(file.getPath())) {
+ count++;
+ }
+ } else {
+ if (StoreFileInfo.isDelFile(file.getPath())) {
+ count++;
+ }
+ }
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Gets the number of HFileLink in the mob path.
+ * @param familyName the family name
+ * @return the number of the HFileLink
+ */
+ private int countHFileLinks(String familyName) throws IOException {
+ Path mobDirPath = MobUtils.getMobFamilyPath(
+ MobUtils.getMobRegionPath(conf, tableName), familyName);
+ int count = 0;
+ if (fs.exists(mobDirPath)) {
+ FileStatus[] files = fs.listStatus(mobDirPath);
+ for (FileStatus file : files) {
+ if (HFileLink.isHFileLink(file.getPath())) {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ /**
+ * Gets the number of files.
+ * @param size the size of the file
+ * @param familyName the family name
+ * @return the number of files large than the size
+ */
+ private int countLargeFiles(int size, String familyName) throws IOException {
+ Path mobDirPath = MobUtils.getMobFamilyPath(
+ MobUtils.getMobRegionPath(conf, tableName), familyName);
+ int count = 0;
+ if (fs.exists(mobDirPath)) {
+ FileStatus[] files = fs.listStatus(mobDirPath);
+ for (FileStatus file : files) {
+ // ignore the del files in the mob path
+ if ((!StoreFileInfo.isDelFile(file.getPath()))
+ && (file.getLen() > size)) {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ /**
+ * loads some data to the table.
- * @param count the mob file number
+ */
- private void loadData(Admin admin, HTable table, TableName tableName, int fileNum,
++ private void loadData(Admin admin, BufferedMutator table, TableName tableName, int fileNum,
+ int rowNumPerFile) throws IOException, InterruptedException {
+ if (fileNum <= 0) {
+ throw new IllegalArgumentException();
+ }
+ for (byte k0 : KEYS) {
+ byte[] k = new byte[] { k0 };
+ for (int i = 0; i < fileNum * rowNumPerFile; i++) {
+ byte[] key = Bytes.add(k, Bytes.toBytes(i));
+ byte[] mobVal = makeDummyData(10 * (i + 1));
+ Put put = new Put(key);
+ put.setDurability(Durability.SKIP_WAL);
- put.add(Bytes.toBytes(family1), Bytes.toBytes(qf1), mobVal);
- put.add(Bytes.toBytes(family1), Bytes.toBytes(qf2), mobVal);
- put.add(Bytes.toBytes(family2), Bytes.toBytes(qf1), mobVal);
- table.put(put);
++ put.addColumn(Bytes.toBytes(family1), Bytes.toBytes(qf1), mobVal);
++ put.addColumn(Bytes.toBytes(family1), Bytes.toBytes(qf2), mobVal);
++ put.addColumn(Bytes.toBytes(family2), Bytes.toBytes(qf1), mobVal);
++ table.mutate(put);
+ if ((i + 1) % rowNumPerFile == 0) {
- table.flushCommits();
++ table.flush();
+ admin.flush(tableName);
+ }
+ }
+ }
+ }
+
+ /**
+ * delete the row, family and cell to create the del file
+ */
+ private void createDelFile() throws IOException, InterruptedException {
+ for (byte k0 : KEYS) {
+ byte[] k = new byte[] { k0 };
+ // delete a family
+ byte[] key1 = Bytes.add(k, Bytes.toBytes(0));
+ Delete delete1 = new Delete(key1);
- delete1.deleteFamily(Bytes.toBytes(family1));
++ delete1.addFamily(Bytes.toBytes(family1));
+ hTable.delete(delete1);
+ // delete one row
+ byte[] key2 = Bytes.add(k, Bytes.toBytes(2));
+ Delete delete2 = new Delete(key2);
+ hTable.delete(delete2);
+ // delete one cell
+ byte[] key3 = Bytes.add(k, Bytes.toBytes(4));
+ Delete delete3 = new Delete(key3);
- delete3.deleteColumn(Bytes.toBytes(family1), Bytes.toBytes(qf1));
++ delete3.addColumn(Bytes.toBytes(family1), Bytes.toBytes(qf1));
+ hTable.delete(delete3);
- hTable.flushCommits();
+ admin.flush(tableName);
+ List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(
+ Bytes.toBytes(tableNameAsString));
+ for (HRegion region : regions) {
+ region.waitForFlushesAndCompactions();
- region.compactStores(true);
++ region.compact(true);
+ }
+ }
+ }
+ /**
+ * Creates the dummy data with a specific size.
- * @param the size of data
++ * @param size the size of value
+ * @return the dummy data
+ */
+ private byte[] makeDummyData(int size) {
+ byte[] dummyData = new byte[size];
+ new Random().nextBytes(dummyData);
+ return dummyData;
+ }
+
+ /**
+ * Gets the split keys
+ */
+ private byte[][] getSplitKeys() {
+ byte[][] splitKeys = new byte[KEYS.length - 1][];
+ for (int i = 0; i < splitKeys.length; ++i) {
+ splitKeys[i] = new byte[] { KEYS[i + 1] };
+ }
+ return splitKeys;
+ }
+
+ private static ExecutorService createThreadPool(Configuration conf) {
+ int maxThreads = 10;
+ long keepAliveTime = 60;
+ final SynchronousQueue<Runnable> queue = new SynchronousQueue<Runnable>();
+ ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads,
+ keepAliveTime, TimeUnit.SECONDS, queue,
+ Threads.newDaemonThreadFactory("MobFileCompactionChore"),
+ new RejectedExecutionHandler() {
+ @Override
+ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
+ try {
+ // waiting for a thread to pick up instead of throwing exceptions.
+ queue.put(r);
+ } catch (InterruptedException e) {
+ throw new RejectedExecutionException(e);
+ }
+ }
+ });
+ ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
+ return pool;
+ }
+
+ private void assertRefFileNameEqual(String familyName) throws IOException {
+ Scan scan = new Scan();
+ scan.addFamily(Bytes.toBytes(familyName));
+ // Do not retrieve the mob data when scanning
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ ResultScanner results = hTable.getScanner(scan);
+ Path mobFamilyPath = new Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(),
+ tableName), familyName);
+ List<Path> actualFilePaths = new ArrayList<>();
+ List<Path> expectFilePaths = new ArrayList<>();
+ for (Result res : results) {
+ for (Cell cell : res.listCells()) {
+ byte[] referenceValue = CellUtil.cloneValue(cell);
+ String fileName = Bytes.toString(referenceValue, Bytes.SIZEOF_INT,
+ referenceValue.length - Bytes.SIZEOF_INT);
+ Path targetPath = new Path(mobFamilyPath, fileName);
+ if(!actualFilePaths.contains(targetPath)) {
+ actualFilePaths.add(targetPath);
+ }
+ }
+ }
+ results.close();
+ if (fs.exists(mobFamilyPath)) {
+ FileStatus[] files = fs.listStatus(mobFamilyPath);
+ for (FileStatus file : files) {
+ if (!StoreFileInfo.isDelFile(file.getPath())) {
+ expectFilePaths.add(file.getPath());
+ }
+ }
+ }
+ Collections.sort(actualFilePaths);
+ Collections.sort(expectFilePaths);
+ assertEquals(expectFilePaths, actualFilePaths);
+ }
+
+ /**
+ * Resets the configuration.
+ */
+ private void resetConf() {
+ conf.setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD,
+ MobConstants.DEFAULT_MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD);
+ conf.setInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE,
+ MobConstants.DEFAULT_MOB_FILE_COMPACTION_BATCH_SIZE);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java
index 3c73d52,0000000..ed3853e
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java
@@@ -1,446 -1,0 +1,441 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.filecompactions;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
++import org.apache.hadoop.hbase.regionserver.*;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobFileName;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.filecompactions.MobFileCompactionRequest.CompactionType;
+import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartition;
- import org.apache.hadoop.hbase.regionserver.BloomType;
- import org.apache.hadoop.hbase.regionserver.HStore;
- import org.apache.hadoop.hbase.regionserver.ScanInfo;
- import org.apache.hadoop.hbase.regionserver.ScanType;
- import org.apache.hadoop.hbase.regionserver.StoreFile;
- import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
- import org.apache.hadoop.hbase.regionserver.StoreScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestPartitionedMobFileCompactor {
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static String family = "family";
+ private final static String qf = "qf";
+ private HColumnDescriptor hcd = new HColumnDescriptor(family);
+ private Configuration conf = TEST_UTIL.getConfiguration();
+ private CacheConfig cacheConf = new CacheConfig(conf);
+ private FileSystem fs;
+ private List<FileStatus> mobFiles = new ArrayList<>();
+ private List<FileStatus> delFiles = new ArrayList<>();
+ private List<FileStatus> allFiles = new ArrayList<>();
+ private Path basePath;
+ private String mobSuffix;
+ private String delSuffix;
+ private static ExecutorService pool;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+ TEST_UTIL.startMiniCluster(1);
- pool = createThreadPool(TEST_UTIL.getConfiguration());
++ pool = createThreadPool();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ pool.shutdown();
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private void init(String tableName) throws Exception {
+ fs = FileSystem.get(conf);
+ Path testDir = FSUtils.getRootDir(conf);
+ Path mobTestDir = new Path(testDir, MobConstants.MOB_DIR_NAME);
+ basePath = new Path(new Path(mobTestDir, tableName), family);
+ mobSuffix = UUID.randomUUID().toString().replaceAll("-", "");
+ delSuffix = UUID.randomUUID().toString().replaceAll("-", "") + "_del";
+ }
+
+ @Test
+ public void testCompactionSelectWithAllFiles() throws Exception {
+ resetConf();
+ String tableName = "testCompactionSelectWithAllFiles";
+ init(tableName);
+ int count = 10;
+ // create 10 mob files.
+ createStoreFiles(basePath, family, qf, count, Type.Put);
+ // create 10 del files
+ createStoreFiles(basePath, family, qf, count, Type.Delete);
+ listFiles();
+ long mergeSize = MobConstants.DEFAULT_MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD;
+ List<String> expectedStartKeys = new ArrayList<>();
+ for(FileStatus file : mobFiles) {
+ if(file.getLen() < mergeSize) {
+ String fileName = file.getPath().getName();
+ String startKey = fileName.substring(0, 32);
+ expectedStartKeys.add(startKey);
+ }
+ }
+ testSelectFiles(tableName, CompactionType.ALL_FILES, false, expectedStartKeys);
+ }
+
+ @Test
+ public void testCompactionSelectWithPartFiles() throws Exception {
+ resetConf();
+ String tableName = "testCompactionSelectWithPartFiles";
+ init(tableName);
+ int count = 10;
+ // create 10 mob files.
+ createStoreFiles(basePath, family, qf, count, Type.Put);
+ // create 10 del files
+ createStoreFiles(basePath, family, qf, count, Type.Delete);
+ listFiles();
+ long mergeSize = 4000;
+ List<String> expectedStartKeys = new ArrayList<>();
+ for(FileStatus file : mobFiles) {
+ if(file.getLen() < 4000) {
+ String fileName = file.getPath().getName();
+ String startKey = fileName.substring(0, 32);
+ expectedStartKeys.add(startKey);
+ }
+ }
+ // set the mob file compaction mergeable threshold
+ conf.setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
+ testSelectFiles(tableName, CompactionType.PART_FILES, false, expectedStartKeys);
+ }
+
+ @Test
+ public void testCompactionSelectWithForceAllFiles() throws Exception {
+ resetConf();
+ String tableName = "testCompactionSelectWithForceAllFiles";
+ init(tableName);
+ int count = 10;
+ // create 10 mob files.
+ createStoreFiles(basePath, family, qf, count, Type.Put);
+ // create 10 del files
+ createStoreFiles(basePath, family, qf, count, Type.Delete);
+ listFiles();
+ long mergeSize = 4000;
+ List<String> expectedStartKeys = new ArrayList<>();
+ for(FileStatus file : mobFiles) {
+ String fileName = file.getPath().getName();
+ String startKey = fileName.substring(0, 32);
+ expectedStartKeys.add(startKey);
+ }
+ // set the mob file compaction mergeable threshold
+ conf.setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
+ testSelectFiles(tableName, CompactionType.ALL_FILES, true, expectedStartKeys);
+ }
+
+ @Test
+ public void testCompactDelFilesWithDefaultBatchSize() throws Exception {
+ resetConf();
+ String tableName = "testCompactDelFilesWithDefaultBatchSize";
+ init(tableName);
+ // create 20 mob files.
+ createStoreFiles(basePath, family, qf, 20, Type.Put);
+ // create 13 del files
+ createStoreFiles(basePath, family, qf, 13, Type.Delete);
+ listFiles();
+ testCompactDelFiles(tableName, 1, 13, false);
+ }
+
+ @Test
+ public void testCompactDelFilesWithSmallBatchSize() throws Exception {
+ resetConf();
+ String tableName = "testCompactDelFilesWithSmallBatchSize";
+ init(tableName);
+ // create 20 mob files.
+ createStoreFiles(basePath, family, qf, 20, Type.Put);
+ // create 13 del files
+ createStoreFiles(basePath, family, qf, 13, Type.Delete);
+ listFiles();
+
+ // set the mob file compaction batch size
+ conf.setInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE, 4);
+ testCompactDelFiles(tableName, 1, 13, false);
+ }
+
+ @Test
+ public void testCompactDelFilesChangeMaxDelFileCount() throws Exception {
+ resetConf();
+ String tableName = "testCompactDelFilesWithSmallBatchSize";
+ init(tableName);
+ // create 20 mob files.
+ createStoreFiles(basePath, family, qf, 20, Type.Put);
+ // create 13 del files
+ createStoreFiles(basePath, family, qf, 13, Type.Delete);
+ listFiles();
+
+ // set the max del file count
+ conf.setInt(MobConstants.MOB_DELFILE_MAX_COUNT, 5);
+ // set the mob file compaction batch size
+ conf.setInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE, 2);
+ testCompactDelFiles(tableName, 4, 13, false);
+ }
+
+ /**
+ * Tests the selectFiles
+ * @param tableName the table name
+ * @param type the expected compaction type
+ * @param expected the expected start keys
+ */
+ private void testSelectFiles(String tableName, final CompactionType type,
+ final boolean isForceAllFiles, final List<String> expected) throws IOException {
+ PartitionedMobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs,
+ TableName.valueOf(tableName), hcd, pool) {
+ @Override
+ public List<Path> compact(List<FileStatus> files, boolean isForceAllFiles)
+ throws IOException {
+ if (files == null || files.isEmpty()) {
+ return null;
+ }
+ PartitionedMobFileCompactionRequest request = select(files, isForceAllFiles);
+ // assert the compaction type
+ Assert.assertEquals(type, request.type);
+ // assert get the right partitions
+ compareCompactedPartitions(expected, request.compactionPartitions);
+ // assert get the right del files
+ compareDelFiles(request.delFiles);
+ return null;
+ }
+ };
+ compactor.compact(allFiles, isForceAllFiles);
+ }
+
+ /**
+ * Tests the compacteDelFile
+ * @param tableName the table name
+ * @param expectedFileCount the expected file count
+ * @param expectedCellCount the expected cell count
+ */
+ private void testCompactDelFiles(String tableName, final int expectedFileCount,
+ final int expectedCellCount, boolean isForceAllFiles) throws IOException {
+ PartitionedMobFileCompactor compactor = new PartitionedMobFileCompactor(conf, fs,
+ TableName.valueOf(tableName), hcd, pool) {
+ @Override
+ protected List<Path> performCompaction(PartitionedMobFileCompactionRequest request)
+ throws IOException {
+ List<Path> delFilePaths = new ArrayList<Path>();
+ for (FileStatus delFile : request.delFiles) {
+ delFilePaths.add(delFile.getPath());
+ }
+ List<Path> newDelPaths = compactDelFiles(request, delFilePaths);
+ // assert the del files are merged.
+ Assert.assertEquals(expectedFileCount, newDelPaths.size());
+ Assert.assertEquals(expectedCellCount, countDelCellsInDelFiles(newDelPaths));
+ return null;
+ }
+ };
+ compactor.compact(allFiles, isForceAllFiles);
+ }
+
+ /**
+ * Lists the files in the path
+ */
+ private void listFiles() throws IOException {
+ for (FileStatus file : fs.listStatus(basePath)) {
+ allFiles.add(file);
+ if (file.getPath().getName().endsWith("_del")) {
+ delFiles.add(file);
+ } else {
+ mobFiles.add(file);
+ }
+ }
+ }
+
+ /**
+ * Compares the compacted partitions.
+ * @param partitions the collection of CompactedPartitions
+ */
+ private void compareCompactedPartitions(List<String> expected,
+ Collection<CompactionPartition> partitions) {
+ List<String> actualKeys = new ArrayList<>();
+ for (CompactionPartition partition : partitions) {
+ actualKeys.add(partition.getPartitionId().getStartKey());
+ }
+ Collections.sort(expected);
+ Collections.sort(actualKeys);
+ Assert.assertEquals(expected.size(), actualKeys.size());
+ for (int i = 0; i < expected.size(); i++) {
+ Assert.assertEquals(expected.get(i), actualKeys.get(i));
+ }
+ }
+
+ /**
+ * Compares the del files.
+ * @param allDelFiles all the del files
+ */
+ private void compareDelFiles(Collection<FileStatus> allDelFiles) {
+ int i = 0;
+ for (FileStatus file : allDelFiles) {
+ Assert.assertEquals(delFiles.get(i), file);
+ i++;
+ }
+ }
+
+ /**
+ * Creates store files.
+ * @param basePath the path to create file
+ * @family the family name
+ * @qualifier the column qualifier
+ * @count the store file number
+ * @type the key type
+ */
+ private void createStoreFiles(Path basePath, String family, String qualifier, int count,
+ Type type) throws IOException {
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+ String startKey = "row_";
+ MobFileName mobFileName = null;
+ for (int i = 0; i < count; i++) {
+ byte[] startRow = Bytes.toBytes(startKey + i) ;
+ if(type.equals(Type.Delete)) {
+ mobFileName = MobFileName.create(startRow, MobUtils.formatDate(
+ new Date()), delSuffix);
+ }
+ if(type.equals(Type.Put)){
+ mobFileName = MobFileName.create(Bytes.toBytes(startKey + i), MobUtils.formatDate(
+ new Date()), mobSuffix);
+ }
+ StoreFile.Writer mobFileWriter = new StoreFile.WriterBuilder(conf, cacheConf, fs)
+ .withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build();
+ writeStoreFile(mobFileWriter, startRow, Bytes.toBytes(family), Bytes.toBytes(qualifier),
+ type, (i+1)*1000);
+ }
+ }
+
+ /**
+ * Writes data to store file.
+ * @param writer the store file writer
+ * @param row the row key
+ * @param family the family name
+ * @param qualifier the column qualifier
+ * @param type the key type
+ * @param size the size of value
+ */
+ private static void writeStoreFile(final StoreFile.Writer writer, byte[]row, byte[] family,
+ byte[] qualifier, Type type, int size) throws IOException {
+ long now = System.currentTimeMillis();
+ try {
+ byte[] dummyData = new byte[size];
+ new Random().nextBytes(dummyData);
+ writer.append(new KeyValue(row, family, qualifier, now, type, dummyData));
+ } finally {
+ writer.close();
+ }
+ }
+
+ /**
+ * Gets the number of del cell in the del files
+ * @param paths the del file paths
+ * @return the cell size
+ */
+ private int countDelCellsInDelFiles(List<Path> paths) throws IOException {
+ List<StoreFile> sfs = new ArrayList<StoreFile>();
+ int size = 0;
+ for(Path path : paths) {
+ StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE);
+ sfs.add(sf);
+ }
+ List scanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true,
+ false, null, HConstants.LATEST_TIMESTAMP);
+ Scan scan = new Scan();
+ scan.setMaxVersions(hcd.getMaxVersions());
+ long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
+ long ttl = HStore.determineTTLFromFamily(hcd);
+ ScanInfo scanInfo = new ScanInfo(hcd, ttl, timeToPurgeDeletes, KeyValue.COMPARATOR);
+ StoreScanner scanner = new StoreScanner(scan, scanInfo, ScanType.COMPACT_RETAIN_DELETES, null,
+ scanners, 0L, HConstants.LATEST_TIMESTAMP);
+ List<Cell> results = new ArrayList<>();
+ boolean hasMore = true;
++
+ while (hasMore) {
+ hasMore = scanner.next(results);
+ size += results.size();
+ results.clear();
+ }
+ scanner.close();
+ return size;
+ }
+
- private static ExecutorService createThreadPool(Configuration conf) {
++ private static ExecutorService createThreadPool() {
+ int maxThreads = 10;
+ long keepAliveTime = 60;
+ final SynchronousQueue<Runnable> queue = new SynchronousQueue<Runnable>();
+ ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime,
+ TimeUnit.SECONDS, queue, Threads.newDaemonThreadFactory("MobFileCompactionChore"),
+ new RejectedExecutionHandler() {
+ @Override
+ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
+ try {
+ // waiting for a thread to pick up instead of throwing exceptions.
+ queue.put(r);
+ } catch (InterruptedException e) {
+ throw new RejectedExecutionException(e);
+ }
+ }
+ });
+ ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
+ return pool;
+ }
+
+ /**
+ * Resets the configuration.
+ */
+ private void resetConf() {
+ conf.setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD,
+ MobConstants.DEFAULT_MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD);
+ conf.setInt(MobConstants.MOB_DELFILE_MAX_COUNT, MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT);
+ conf.setInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE,
+ MobConstants.DEFAULT_MOB_FILE_COMPACTION_BATCH_SIZE);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java
index 49345e4,0000000..3023849
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java
@@@ -1,168 -1,0 +1,169 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.mapreduce;
+
++import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.serializer.JavaSerialization;
+import org.apache.hadoop.io.serializer.WritableSerialization;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestMobSweepJob {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ TEST_UTIL.getConfiguration().set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
+ JavaSerialization.class.getName() + "," + WritableSerialization.class.getName());
+ TEST_UTIL.startMiniCluster();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ private void writeFileNames(FileSystem fs, Configuration conf, Path path,
+ String[] filesNames) throws IOException {
+ // write the names to a sequence file
+ SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path,
+ String.class, String.class);
+ try {
+ for (String fileName : filesNames) {
+ writer.append(fileName, MobConstants.EMPTY_STRING);
+ }
+ } finally {
+ IOUtils.closeStream(writer);
+ }
+ }
+
+ @Test
+ public void testSweeperJobWithOutUnusedFile() throws Exception {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Configuration configuration = new Configuration(
+ TEST_UTIL.getConfiguration());
+ Path vistiedFileNamesPath = new Path(MobUtils.getMobHome(configuration),
+ "/hbase/mobcompaction/SweepJob/working/names/0/visited");
+ Path allFileNamesPath = new Path(MobUtils.getMobHome(configuration),
+ "/hbase/mobcompaction/SweepJob/working/names/0/all");
+ configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,
+ vistiedFileNamesPath.toString());
+ configuration.set(SweepJob.WORKING_ALLNAMES_FILE_KEY,
+ allFileNamesPath.toString());
+
+ writeFileNames(fs, configuration, allFileNamesPath, new String[] { "1",
+ "2", "3", "4", "5", "6"});
+
+ Path r0 = new Path(vistiedFileNamesPath, "r0");
+ writeFileNames(fs, configuration, r0, new String[] { "1",
+ "2", "3"});
+ Path r1 = new Path(vistiedFileNamesPath, "r1");
+ writeFileNames(fs, configuration, r1, new String[] { "1", "4", "5"});
+ Path r2 = new Path(vistiedFileNamesPath, "r2");
+ writeFileNames(fs, configuration, r2, new String[] { "2", "3", "6"});
+
+ SweepJob sweepJob = new SweepJob(configuration, fs);
+ List<String> toBeArchived = sweepJob.getUnusedFiles(configuration);
+
+ assertEquals(0, toBeArchived.size());
+ }
+
+ @Test
+ public void testSweeperJobWithUnusedFile() throws Exception {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Configuration configuration = new Configuration(
+ TEST_UTIL.getConfiguration());
+ Path vistiedFileNamesPath = new Path(MobUtils.getMobHome(configuration),
+ "/hbase/mobcompaction/SweepJob/working/names/1/visited");
+ Path allFileNamesPath = new Path(MobUtils.getMobHome(configuration),
+ "/hbase/mobcompaction/SweepJob/working/names/1/all");
+ configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,
+ vistiedFileNamesPath.toString());
+ configuration.set(SweepJob.WORKING_ALLNAMES_FILE_KEY,
+ allFileNamesPath.toString());
+
+ writeFileNames(fs, configuration, allFileNamesPath, new String[] { "1",
+ "2", "3", "4", "5", "6"});
+
+ Path r0 = new Path(vistiedFileNamesPath, "r0");
+ writeFileNames(fs, configuration, r0, new String[] { "1",
+ "2", "3"});
+ Path r1 = new Path(vistiedFileNamesPath, "r1");
+ writeFileNames(fs, configuration, r1, new String[] { "1", "5"});
+ Path r2 = new Path(vistiedFileNamesPath, "r2");
+ writeFileNames(fs, configuration, r2, new String[] { "2", "3"});
+
+ SweepJob sweepJob = new SweepJob(configuration, fs);
+ List<String> toBeArchived = sweepJob.getUnusedFiles(configuration);
+
+ assertEquals(2, toBeArchived.size());
- assertEquals(new String[] { "4", "6" }, toBeArchived.toArray(new String[0]));
++ assertArrayEquals(new String[]{"4", "6"}, toBeArchived.toArray(new String[0]));
+ }
+
+ @Test
+ public void testSweeperJobWithRedundantFile() throws Exception {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Configuration configuration = new Configuration(
+ TEST_UTIL.getConfiguration());
+ Path vistiedFileNamesPath = new Path(MobUtils.getMobHome(configuration),
+ "/hbase/mobcompaction/SweepJob/working/names/2/visited");
+ Path allFileNamesPath = new Path(MobUtils.getMobHome(configuration),
+ "/hbase/mobcompaction/SweepJob/working/names/2/all");
+ configuration.set(SweepJob.WORKING_VISITED_DIR_KEY,
+ vistiedFileNamesPath.toString());
+ configuration.set(SweepJob.WORKING_ALLNAMES_FILE_KEY,
+ allFileNamesPath.toString());
+
+ writeFileNames(fs, configuration, allFileNamesPath, new String[] { "1",
+ "2", "3", "4", "5", "6"});
+
+ Path r0 = new Path(vistiedFileNamesPath, "r0");
+ writeFileNames(fs, configuration, r0, new String[] { "1",
+ "2", "3"});
+ Path r1 = new Path(vistiedFileNamesPath, "r1");
+ writeFileNames(fs, configuration, r1, new String[] { "1", "5", "6", "7"});
+ Path r2 = new Path(vistiedFileNamesPath, "r2");
+ writeFileNames(fs, configuration, r2, new String[] { "2", "3", "4"});
+
+ SweepJob sweepJob = new SweepJob(configuration, fs);
+ List<String> toBeArchived = sweepJob.getUnusedFiles(configuration);
+
+ assertEquals(0, toBeArchived.size());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java
index 308b50e,0000000..8c24123
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java
@@@ -1,220 -1,0 +1,219 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.mapreduce;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
+import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.DummyMobAbortable;
+import org.apache.hadoop.hbase.mob.mapreduce.SweepJob.SweepCounter;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.serializer.JavaSerialization;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.counters.GenericCounter;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Matchers;
+
+@Category(MediumTests.class)
+public class TestMobSweepReducer {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static String tableName = "testSweepReducer";
+ private final static String row = "row";
+ private final static String family = "family";
+ private final static String qf = "qf";
- private static HTable table;
++ private static BufferedMutator table;
+ private static Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+
+ TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @SuppressWarnings("deprecation")
+ @Before
+ public void setUp() throws Exception {
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
+ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), tableName);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getBufferedMutator(TableName.valueOf(tableName));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ admin.disableTable(TableName.valueOf(tableName));
+ admin.deleteTable(TableName.valueOf(tableName));
+ admin.close();
+ }
+
+ private List<String> getKeyFromSequenceFile(FileSystem fs, Path path,
+ Configuration conf) throws Exception {
+ List<String> list = new ArrayList<String>();
+ SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(path));
+
+ String next = (String) reader.next((String) null);
+ while (next != null) {
+ list.add(next);
+ next = (String) reader.next((String) null);
+ }
+ reader.close();
+ return list;
+ }
+
+ @Test
+ public void testRun() throws Exception {
+
+ TableName tn = TableName.valueOf(tableName);
+ byte[] mobValueBytes = new byte[100];
+
+ //get the path where mob files lie in
+ Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, family);
+
+ Put put = new Put(Bytes.toBytes(row));
- put.add(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes);
++ put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes);
+ Put put2 = new Put(Bytes.toBytes(row + "ignore"));
- put2.add(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes);
- table.put(put);
- table.put(put2);
- table.flushCommits();
++ put2.addColumn(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes);
++ table.mutate(put);
++ table.mutate(put2);
++ table.flush();
+ admin.flush(tn);
+
+ FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ //check the generation of a mob file
+ assertEquals(1, fileStatuses.length);
+
+ String mobFile1 = fileStatuses[0].getPath().getName();
+
+ Configuration configuration = new Configuration(TEST_UTIL.getConfiguration());
+ configuration.setFloat(MobConstants.MOB_SWEEP_TOOL_COMPACTION_RATIO, 0.6f);
+ configuration.setStrings(TableInputFormat.INPUT_TABLE, tableName);
+ configuration.setStrings(TableInputFormat.SCAN_COLUMN_FAMILY, family);
+ configuration.setStrings(SweepJob.WORKING_VISITED_DIR_KEY, "jobWorkingNamesDir");
+ configuration.setStrings(SweepJob.WORKING_FILES_DIR_KEY, "compactionFileDir");
+ configuration.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
+ JavaSerialization.class.getName());
+ configuration.set(SweepJob.WORKING_VISITED_DIR_KEY, "compactionVisitedDir");
+ configuration.setLong(MobConstants.MOB_SWEEP_TOOL_COMPACTION_START_DATE,
+ System.currentTimeMillis() + 24 * 3600 * 1000);
+
+ ZooKeeperWatcher zkw = new ZooKeeperWatcher(configuration, "1", new DummyMobAbortable());
+ TableName lockName = MobUtils.getTableLockName(tn);
+ String znode = ZKUtil.joinZNode(zkw.tableLockZNode, lockName.getNameAsString());
+ configuration.set(SweepJob.SWEEP_JOB_ID, "1");
+ configuration.set(SweepJob.SWEEP_JOB_TABLE_NODE, znode);
+ ServerName serverName = SweepJob.getCurrentServerName(configuration);
+ configuration.set(SweepJob.SWEEP_JOB_SERVERNAME, serverName.toString());
+
+ TableLockManager tableLockManager = TableLockManager.createTableLockManager(configuration, zkw,
+ serverName);
+ TableLock lock = tableLockManager.writeLock(lockName, "Run sweep tool");
+ lock.acquire();
+ try {
+ // use the same counter when mocking
+ Counter counter = new GenericCounter();
+ Reducer<Text, KeyValue, Writable, Writable>.Context ctx = mock(Reducer.Context.class);
+ when(ctx.getConfiguration()).thenReturn(configuration);
+ when(ctx.getCounter(Matchers.any(SweepCounter.class))).thenReturn(counter);
+ when(ctx.nextKey()).thenReturn(true).thenReturn(false);
+ when(ctx.getCurrentKey()).thenReturn(new Text(mobFile1));
+
+ byte[] refBytes = Bytes.toBytes(mobFile1);
+ long valueLength = refBytes.length;
+ byte[] newValue = Bytes.add(Bytes.toBytes(valueLength), refBytes);
+ KeyValue kv2 = new KeyValue(Bytes.toBytes(row), Bytes.toBytes(family), Bytes.toBytes(qf), 1,
+ KeyValue.Type.Put, newValue);
+ List<KeyValue> list = new ArrayList<KeyValue>();
+ list.add(kv2);
+
+ when(ctx.getValues()).thenReturn(list);
+
+ SweepReducer reducer = new SweepReducer();
+ reducer.run(ctx);
+ } finally {
+ lock.release();
+ }
+ FileStatus[] filsStatuses2 = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ String mobFile2 = filsStatuses2[0].getPath().getName();
+ //new mob file is generated, old one has been archived
+ assertEquals(1, filsStatuses2.length);
+ assertEquals(false, mobFile2.equalsIgnoreCase(mobFile1));
+
+ //test sequence file
+ String workingPath = configuration.get(SweepJob.WORKING_VISITED_DIR_KEY);
+ FileStatus[] statuses = TEST_UTIL.getTestFileSystem().listStatus(new Path(workingPath));
+ Set<String> files = new TreeSet<String>();
+ for (FileStatus st : statuses) {
+ files.addAll(getKeyFromSequenceFile(TEST_UTIL.getTestFileSystem(),
+ st.getPath(), configuration));
+ }
+ assertEquals(1, files.size());
+ assertEquals(true, files.contains(mobFile1));
+ }
+}
[37/50] [abbrv] hbase git commit: HBASE-13455 Procedure V2 - master
truncate table
Posted by jm...@apache.org.
HBASE-13455 Procedure V2 - master truncate table
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4788c6d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4788c6d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4788c6d1
Branch: refs/heads/hbase-11339
Commit: 4788c6d1a8cd4b6f31416f57e757bcd8738b4772
Parents: d75326a
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Apr 15 09:40:18 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Apr 15 10:35:41 2015 +0100
----------------------------------------------------------------------
.../generated/MasterProcedureProtos.java | 2016 ++++++++++++++++--
.../src/main/protobuf/MasterProcedure.proto | 18 +
.../org/apache/hadoop/hbase/master/HMaster.java | 10 +-
.../master/procedure/DeleteTableProcedure.java | 5 +-
.../procedure/TruncateTableProcedure.java | 291 +++
.../hadoop/hbase/HBaseTestingUtility.java | 12 +
.../MasterProcedureTestingUtility.java | 43 +
.../TestMasterFailoverWithProcedures.java | 62 +
.../procedure/TestTruncateTableProcedure.java | 246 +++
9 files changed, 2496 insertions(+), 207 deletions(-)
----------------------------------------------------------------------
[31/50] [abbrv] hbase git commit: HBASE-13457 SnapshotExistsException
doesn't honor the DoNotRetry
Posted by jm...@apache.org.
HBASE-13457 SnapshotExistsException doesn't honor the DoNotRetry
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9da064c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9da064c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9da064c
Branch: refs/heads/hbase-11339
Commit: e9da064ccd8dd3c64d99e0863e36cdc1236779f1
Parents: 679e0e8
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Mon Apr 13 23:21:50 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Mon Apr 13 23:21:50 2015 +0100
----------------------------------------------------------------------
.../hbase/snapshot/SnapshotExistsException.java | 3 +
.../snapshot/TestSnapshotClientRetries.java | 125 +++++++++++++++++++
2 files changed, 128 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9da064c/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java
index 2c609d9..172c89e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java
@@ -28,6 +28,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class SnapshotExistsException extends HBaseSnapshotException {
+ public SnapshotExistsException(String msg) {
+ super(msg);
+ }
/**
* Failure due to the snapshot already existing
http://git-wip-us.apache.org/repos/asf/hbase/blob/e9da064c/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java
new file mode 100644
index 0000000..5168b85
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java
@@ -0,0 +1,125 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotExistsException;
+import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.TestTableName;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({ MediumTests.class })
+public class TestSnapshotClientRetries {
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final Log LOG = LogFactory.getLog(TestSnapshotClientRetries.class);
+
+ @Rule public TestTableName TEST_TABLE = new TestTableName();
+
+ @Before
+ public void setUp() throws Exception {
+ TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
+ MasterSyncObserver.class.getName());
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test(timeout = 60000, expected=SnapshotExistsException.class)
+ public void testSnapshotAlreadyExist() throws Exception {
+ final String snapshotName = "testSnapshotAlreadyExist";
+ TEST_UTIL.createTable(TEST_TABLE.getTableName(), "f");
+ TEST_UTIL.getHBaseAdmin().snapshot(snapshotName, TEST_TABLE.getTableName());
+ snapshotAndAssertOneRetry(snapshotName, TEST_TABLE.getTableName());
+ }
+
+ @Test(timeout = 60000, expected=SnapshotDoesNotExistException.class)
+ public void testCloneNonExistentSnapshot() throws Exception {
+ final String snapshotName = "testCloneNonExistentSnapshot";
+ cloneAndAssertOneRetry(snapshotName, TEST_TABLE.getTableName());
+ }
+
+ public static class MasterSyncObserver extends BaseMasterObserver {
+ volatile AtomicInteger snapshotCount = null;
+ volatile AtomicInteger cloneCount = null;
+
+ @Override
+ public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ if (snapshotCount != null) {
+ snapshotCount.incrementAndGet();
+ }
+ }
+
+ @Override
+ public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+ final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+ throws IOException {
+ if (cloneCount != null) {
+ cloneCount.incrementAndGet();
+ }
+ }
+ }
+
+ public void snapshotAndAssertOneRetry(final String snapshotName, final TableName tableName)
+ throws Exception {
+ MasterSyncObserver observer = getMasterSyncObserver();
+ observer.snapshotCount = new AtomicInteger(0);
+ TEST_UTIL.getHBaseAdmin().snapshot(snapshotName, tableName);
+ assertEquals(1, observer.snapshotCount.get());
+ }
+
+ public void cloneAndAssertOneRetry(final String snapshotName, final TableName tableName)
+ throws Exception {
+ MasterSyncObserver observer = getMasterSyncObserver();
+ observer.cloneCount = new AtomicInteger(0);
+ TEST_UTIL.getHBaseAdmin().cloneSnapshot(snapshotName, tableName);
+ assertEquals(1, observer.cloneCount.get());
+ }
+
+ private MasterSyncObserver getMasterSyncObserver() {
+ return (MasterSyncObserver)TEST_UTIL.getHBaseCluster().getMaster()
+ .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName());
+ }
+}
[04/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java
new file mode 100644
index 0000000..3c7dcdb
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java
@@ -0,0 +1,7219 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Procedure.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class ProcedureProtos {
+ private ProcedureProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ /**
+ * Protobuf enum {@code ProcedureState}
+ */
+ public enum ProcedureState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>INITIALIZING = 1;</code>
+ *
+ * <pre>
+ * Procedure in construction, not yet added to the executor
+ * </pre>
+ */
+ INITIALIZING(0, 1),
+ /**
+ * <code>RUNNABLE = 2;</code>
+ *
+ * <pre>
+ * Procedure added to the executor, and ready to be executed
+ * </pre>
+ */
+ RUNNABLE(1, 2),
+ /**
+ * <code>WAITING = 3;</code>
+ *
+ * <pre>
+ * The procedure is waiting on children to be completed
+ * </pre>
+ */
+ WAITING(2, 3),
+ /**
+ * <code>WAITING_TIMEOUT = 4;</code>
+ *
+ * <pre>
+ * The procedure is waiting a timout or an external event
+ * </pre>
+ */
+ WAITING_TIMEOUT(3, 4),
+ /**
+ * <code>ROLLEDBACK = 5;</code>
+ *
+ * <pre>
+ * The procedure failed and was rolledback
+ * </pre>
+ */
+ ROLLEDBACK(4, 5),
+ /**
+ * <code>FINISHED = 6;</code>
+ *
+ * <pre>
+ * The procedure execution is completed. may need a rollback if failed.
+ * </pre>
+ */
+ FINISHED(5, 6),
+ ;
+
+ /**
+ * <code>INITIALIZING = 1;</code>
+ *
+ * <pre>
+ * Procedure in construction, not yet added to the executor
+ * </pre>
+ */
+ public static final int INITIALIZING_VALUE = 1;
+ /**
+ * <code>RUNNABLE = 2;</code>
+ *
+ * <pre>
+ * Procedure added to the executor, and ready to be executed
+ * </pre>
+ */
+ public static final int RUNNABLE_VALUE = 2;
+ /**
+ * <code>WAITING = 3;</code>
+ *
+ * <pre>
+ * The procedure is waiting on children to be completed
+ * </pre>
+ */
+ public static final int WAITING_VALUE = 3;
+ /**
+ * <code>WAITING_TIMEOUT = 4;</code>
+ *
+ * <pre>
+ * The procedure is waiting a timout or an external event
+ * </pre>
+ */
+ public static final int WAITING_TIMEOUT_VALUE = 4;
+ /**
+ * <code>ROLLEDBACK = 5;</code>
+ *
+ * <pre>
+ * The procedure failed and was rolledback
+ * </pre>
+ */
+ public static final int ROLLEDBACK_VALUE = 5;
+ /**
+ * <code>FINISHED = 6;</code>
+ *
+ * <pre>
+ * The procedure execution is completed. may need a rollback if failed.
+ * </pre>
+ */
+ public static final int FINISHED_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static ProcedureState valueOf(int value) {
+ switch (value) {
+ case 1: return INITIALIZING;
+ case 2: return RUNNABLE;
+ case 3: return WAITING;
+ case 4: return WAITING_TIMEOUT;
+ case 5: return ROLLEDBACK;
+ case 6: return FINISHED;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<ProcedureState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<ProcedureState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<ProcedureState>() {
+ public ProcedureState findValueByNumber(int number) {
+ return ProcedureState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final ProcedureState[] VALUES = values();
+
+ public static ProcedureState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private ProcedureState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:ProcedureState)
+ }
+
+ public interface ProcedureOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string class_name = 1;
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ boolean hasClassName();
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ java.lang.String getClassName();
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ com.google.protobuf.ByteString
+ getClassNameBytes();
+
+ // optional uint64 parent_id = 2;
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ boolean hasParentId();
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ long getParentId();
+
+ // required uint64 proc_id = 3;
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ boolean hasProcId();
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ long getProcId();
+
+ // required uint64 start_time = 4;
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ boolean hasStartTime();
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ long getStartTime();
+
+ // optional string owner = 5;
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ boolean hasOwner();
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ java.lang.String getOwner();
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ com.google.protobuf.ByteString
+ getOwnerBytes();
+
+ // required .ProcedureState state = 6;
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ boolean hasState();
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState();
+
+ // repeated uint32 stack_id = 7;
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ java.util.List<java.lang.Integer> getStackIdList();
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ int getStackIdCount();
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ int getStackId(int index);
+
+ // required uint64 last_update = 8;
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ boolean hasLastUpdate();
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ long getLastUpdate();
+
+ // optional uint32 timeout = 9;
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ boolean hasTimeout();
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ int getTimeout();
+
+ // optional .ForeignExceptionMessage exception = 10;
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ boolean hasException();
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException();
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder();
+
+ // optional bytes result = 11;
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ boolean hasResult();
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ com.google.protobuf.ByteString getResult();
+
+ // optional bytes state_data = 12;
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ boolean hasStateData();
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ com.google.protobuf.ByteString getStateData();
+ }
+ /**
+ * Protobuf type {@code Procedure}
+ *
+ * <pre>
+ **
+ * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
+ * </pre>
+ */
+ public static final class Procedure extends
+ com.google.protobuf.GeneratedMessage
+ implements ProcedureOrBuilder {
+ // Use Procedure.newBuilder() to construct.
+ private Procedure(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Procedure(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Procedure defaultInstance;
+ public static Procedure getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Procedure getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Procedure(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ className_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ parentId_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ procId_ = input.readUInt64();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ startTime_ = input.readUInt64();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ owner_ = input.readBytes();
+ break;
+ }
+ case 48: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState value = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(6, rawValue);
+ } else {
+ bitField0_ |= 0x00000020;
+ state_ = value;
+ }
+ break;
+ }
+ case 56: {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ stackId_ = new java.util.ArrayList<java.lang.Integer>();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ stackId_.add(input.readUInt32());
+ break;
+ }
+ case 58: {
+ int length = input.readRawVarint32();
+ int limit = input.pushLimit(length);
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) {
+ stackId_ = new java.util.ArrayList<java.lang.Integer>();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ while (input.getBytesUntilLimit() > 0) {
+ stackId_.add(input.readUInt32());
+ }
+ input.popLimit(limit);
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000040;
+ lastUpdate_ = input.readUInt64();
+ break;
+ }
+ case 72: {
+ bitField0_ |= 0x00000080;
+ timeout_ = input.readUInt32();
+ break;
+ }
+ case 82: {
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ subBuilder = exception_.toBuilder();
+ }
+ exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(exception_);
+ exception_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000100;
+ break;
+ }
+ case 90: {
+ bitField0_ |= 0x00000200;
+ result_ = input.readBytes();
+ break;
+ }
+ case 98: {
+ bitField0_ |= 0x00000400;
+ stateData_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ stackId_ = java.util.Collections.unmodifiableList(stackId_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Procedure> PARSER =
+ new com.google.protobuf.AbstractParser<Procedure>() {
+ public Procedure parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Procedure(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Procedure> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string class_name = 1;
+ public static final int CLASS_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object className_;
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public boolean hasClassName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public java.lang.String getClassName() {
+ java.lang.Object ref = className_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ className_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getClassNameBytes() {
+ java.lang.Object ref = className_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ className_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional uint64 parent_id = 2;
+ public static final int PARENT_ID_FIELD_NUMBER = 2;
+ private long parentId_;
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ public boolean hasParentId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ public long getParentId() {
+ return parentId_;
+ }
+
+ // required uint64 proc_id = 3;
+ public static final int PROC_ID_FIELD_NUMBER = 3;
+ private long procId_;
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+
+ // required uint64 start_time = 4;
+ public static final int START_TIME_FIELD_NUMBER = 4;
+ private long startTime_;
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ public boolean hasStartTime() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ public long getStartTime() {
+ return startTime_;
+ }
+
+ // optional string owner = 5;
+ public static final int OWNER_FIELD_NUMBER = 5;
+ private java.lang.Object owner_;
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public boolean hasOwner() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public java.lang.String getOwner() {
+ java.lang.Object ref = owner_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ owner_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getOwnerBytes() {
+ java.lang.Object ref = owner_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ owner_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required .ProcedureState state = 6;
+ public static final int STATE_FIELD_NUMBER = 6;
+ private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState state_;
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState() {
+ return state_;
+ }
+
+ // repeated uint32 stack_id = 7;
+ public static final int STACK_ID_FIELD_NUMBER = 7;
+ private java.util.List<java.lang.Integer> stackId_;
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public java.util.List<java.lang.Integer>
+ getStackIdList() {
+ return stackId_;
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public int getStackIdCount() {
+ return stackId_.size();
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public int getStackId(int index) {
+ return stackId_.get(index);
+ }
+
+ // required uint64 last_update = 8;
+ public static final int LAST_UPDATE_FIELD_NUMBER = 8;
+ private long lastUpdate_;
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ public boolean hasLastUpdate() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ public long getLastUpdate() {
+ return lastUpdate_;
+ }
+
+ // optional uint32 timeout = 9;
+ public static final int TIMEOUT_FIELD_NUMBER = 9;
+ private int timeout_;
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ public boolean hasTimeout() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ public int getTimeout() {
+ return timeout_;
+ }
+
+ // optional .ForeignExceptionMessage exception = 10;
+ public static final int EXCEPTION_FIELD_NUMBER = 10;
+ private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_;
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public boolean hasException() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() {
+ return exception_;
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() {
+ return exception_;
+ }
+
+ // optional bytes result = 11;
+ public static final int RESULT_FIELD_NUMBER = 11;
+ private com.google.protobuf.ByteString result_;
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ public com.google.protobuf.ByteString getResult() {
+ return result_;
+ }
+
+ // optional bytes state_data = 12;
+ public static final int STATE_DATA_FIELD_NUMBER = 12;
+ private com.google.protobuf.ByteString stateData_;
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ public boolean hasStateData() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ public com.google.protobuf.ByteString getStateData() {
+ return stateData_;
+ }
+
+ private void initFields() {
+ className_ = "";
+ parentId_ = 0L;
+ procId_ = 0L;
+ startTime_ = 0L;
+ owner_ = "";
+ state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING;
+ stackId_ = java.util.Collections.emptyList();
+ lastUpdate_ = 0L;
+ timeout_ = 0;
+ exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance();
+ result_ = com.google.protobuf.ByteString.EMPTY;
+ stateData_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasClassName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasProcId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasStartTime()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasState()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasLastUpdate()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getClassNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, parentId_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, procId_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt64(4, startTime_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, getOwnerBytes());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeEnum(6, state_.getNumber());
+ }
+ for (int i = 0; i < stackId_.size(); i++) {
+ output.writeUInt32(7, stackId_.get(i));
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeUInt64(8, lastUpdate_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeUInt32(9, timeout_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeMessage(10, exception_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ output.writeBytes(11, result_);
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ output.writeBytes(12, stateData_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getClassNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, parentId_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, procId_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(4, startTime_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getOwnerBytes());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(6, state_.getNumber());
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < stackId_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeUInt32SizeNoTag(stackId_.get(i));
+ }
+ size += dataSize;
+ size += 1 * getStackIdList().size();
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(8, lastUpdate_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(9, timeout_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(10, exception_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(11, result_);
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(12, stateData_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) obj;
+
+ boolean result = true;
+ result = result && (hasClassName() == other.hasClassName());
+ if (hasClassName()) {
+ result = result && getClassName()
+ .equals(other.getClassName());
+ }
+ result = result && (hasParentId() == other.hasParentId());
+ if (hasParentId()) {
+ result = result && (getParentId()
+ == other.getParentId());
+ }
+ result = result && (hasProcId() == other.hasProcId());
+ if (hasProcId()) {
+ result = result && (getProcId()
+ == other.getProcId());
+ }
+ result = result && (hasStartTime() == other.hasStartTime());
+ if (hasStartTime()) {
+ result = result && (getStartTime()
+ == other.getStartTime());
+ }
+ result = result && (hasOwner() == other.hasOwner());
+ if (hasOwner()) {
+ result = result && getOwner()
+ .equals(other.getOwner());
+ }
+ result = result && (hasState() == other.hasState());
+ if (hasState()) {
+ result = result &&
+ (getState() == other.getState());
+ }
+ result = result && getStackIdList()
+ .equals(other.getStackIdList());
+ result = result && (hasLastUpdate() == other.hasLastUpdate());
+ if (hasLastUpdate()) {
+ result = result && (getLastUpdate()
+ == other.getLastUpdate());
+ }
+ result = result && (hasTimeout() == other.hasTimeout());
+ if (hasTimeout()) {
+ result = result && (getTimeout()
+ == other.getTimeout());
+ }
+ result = result && (hasException() == other.hasException());
+ if (hasException()) {
+ result = result && getException()
+ .equals(other.getException());
+ }
+ result = result && (hasResult() == other.hasResult());
+ if (hasResult()) {
+ result = result && getResult()
+ .equals(other.getResult());
+ }
+ result = result && (hasStateData() == other.hasStateData());
+ if (hasStateData()) {
+ result = result && getStateData()
+ .equals(other.getStateData());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasClassName()) {
+ hash = (37 * hash) + CLASS_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getClassName().hashCode();
+ }
+ if (hasParentId()) {
+ hash = (37 * hash) + PARENT_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getParentId());
+ }
+ if (hasProcId()) {
+ hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getProcId());
+ }
+ if (hasStartTime()) {
+ hash = (37 * hash) + START_TIME_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getStartTime());
+ }
+ if (hasOwner()) {
+ hash = (37 * hash) + OWNER_FIELD_NUMBER;
+ hash = (53 * hash) + getOwner().hashCode();
+ }
+ if (hasState()) {
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getState());
+ }
+ if (getStackIdCount() > 0) {
+ hash = (37 * hash) + STACK_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getStackIdList().hashCode();
+ }
+ if (hasLastUpdate()) {
+ hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLastUpdate());
+ }
+ if (hasTimeout()) {
+ hash = (37 * hash) + TIMEOUT_FIELD_NUMBER;
+ hash = (53 * hash) + getTimeout();
+ }
+ if (hasException()) {
+ hash = (37 * hash) + EXCEPTION_FIELD_NUMBER;
+ hash = (53 * hash) + getException().hashCode();
+ }
+ if (hasResult()) {
+ hash = (37 * hash) + RESULT_FIELD_NUMBER;
+ hash = (53 * hash) + getResult().hashCode();
+ }
+ if (hasStateData()) {
+ hash = (37 * hash) + STATE_DATA_FIELD_NUMBER;
+ hash = (53 * hash) + getStateData().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code Procedure}
+ *
+ * <pre>
+ **
+ * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
+ * </pre>
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getExceptionFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ className_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ parentId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ procId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ startTime_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ owner_ = "";
+ bitField0_ = (bitField0_ & ~0x00000010);
+ state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ stackId_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ lastUpdate_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ timeout_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ if (exceptionBuilder_ == null) {
+ exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance();
+ } else {
+ exceptionBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000200);
+ result_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000400);
+ stateData_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000800);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure build() {
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.className_ = className_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.parentId_ = parentId_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.procId_ = procId_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.startTime_ = startTime_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.owner_ = owner_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.state_ = state_;
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ stackId_ = java.util.Collections.unmodifiableList(stackId_);
+ bitField0_ = (bitField0_ & ~0x00000040);
+ }
+ result.stackId_ = stackId_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.lastUpdate_ = lastUpdate_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.timeout_ = timeout_;
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ if (exceptionBuilder_ == null) {
+ result.exception_ = exception_;
+ } else {
+ result.exception_ = exceptionBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000200;
+ }
+ result.result_ = result_;
+ if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+ to_bitField0_ |= 0x00000400;
+ }
+ result.stateData_ = stateData_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) return this;
+ if (other.hasClassName()) {
+ bitField0_ |= 0x00000001;
+ className_ = other.className_;
+ onChanged();
+ }
+ if (other.hasParentId()) {
+ setParentId(other.getParentId());
+ }
+ if (other.hasProcId()) {
+ setProcId(other.getProcId());
+ }
+ if (other.hasStartTime()) {
+ setStartTime(other.getStartTime());
+ }
+ if (other.hasOwner()) {
+ bitField0_ |= 0x00000010;
+ owner_ = other.owner_;
+ onChanged();
+ }
+ if (other.hasState()) {
+ setState(other.getState());
+ }
+ if (!other.stackId_.isEmpty()) {
+ if (stackId_.isEmpty()) {
+ stackId_ = other.stackId_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ ensureStackIdIsMutable();
+ stackId_.addAll(other.stackId_);
+ }
+ onChanged();
+ }
+ if (other.hasLastUpdate()) {
+ setLastUpdate(other.getLastUpdate());
+ }
+ if (other.hasTimeout()) {
+ setTimeout(other.getTimeout());
+ }
+ if (other.hasException()) {
+ mergeException(other.getException());
+ }
+ if (other.hasResult()) {
+ setResult(other.getResult());
+ }
+ if (other.hasStateData()) {
+ setStateData(other.getStateData());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasClassName()) {
+
+ return false;
+ }
+ if (!hasProcId()) {
+
+ return false;
+ }
+ if (!hasStartTime()) {
+
+ return false;
+ }
+ if (!hasState()) {
+
+ return false;
+ }
+ if (!hasLastUpdate()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string class_name = 1;
+ private java.lang.Object className_ = "";
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public boolean hasClassName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public java.lang.String getClassName() {
+ java.lang.Object ref = className_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ className_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getClassNameBytes() {
+ java.lang.Object ref = className_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ className_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public Builder setClassName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ className_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public Builder clearClassName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ className_ = getDefaultInstance().getClassName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string class_name = 1;</code>
+ *
+ * <pre>
+ * internal "static" state
+ * </pre>
+ */
+ public Builder setClassNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ className_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 parent_id = 2;
+ private long parentId_ ;
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ public boolean hasParentId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ public long getParentId() {
+ return parentId_;
+ }
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ public Builder setParentId(long value) {
+ bitField0_ |= 0x00000002;
+ parentId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 parent_id = 2;</code>
+ *
+ * <pre>
+ * parent if not a root-procedure otherwise not set
+ * </pre>
+ */
+ public Builder clearParentId() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ parentId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 proc_id = 3;
+ private long procId_ ;
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ public boolean hasProcId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ public long getProcId() {
+ return procId_;
+ }
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ public Builder setProcId(long value) {
+ bitField0_ |= 0x00000004;
+ procId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required uint64 proc_id = 3;</code>
+ */
+ public Builder clearProcId() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ procId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 start_time = 4;
+ private long startTime_ ;
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ public boolean hasStartTime() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ public long getStartTime() {
+ return startTime_;
+ }
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ public Builder setStartTime(long value) {
+ bitField0_ |= 0x00000008;
+ startTime_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required uint64 start_time = 4;</code>
+ */
+ public Builder clearStartTime() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ startTime_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional string owner = 5;
+ private java.lang.Object owner_ = "";
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public boolean hasOwner() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public java.lang.String getOwner() {
+ java.lang.Object ref = owner_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ owner_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public com.google.protobuf.ByteString
+ getOwnerBytes() {
+ java.lang.Object ref = owner_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ owner_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public Builder setOwner(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ owner_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public Builder clearOwner() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ owner_ = getDefaultInstance().getOwner();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string owner = 5;</code>
+ */
+ public Builder setOwnerBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ owner_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required .ProcedureState state = 6;
+ private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING;
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState() {
+ return state_;
+ }
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ state_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required .ProcedureState state = 6;</code>
+ *
+ * <pre>
+ * internal "runtime" state
+ * </pre>
+ */
+ public Builder clearState() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING;
+ onChanged();
+ return this;
+ }
+
+ // repeated uint32 stack_id = 7;
+ private java.util.List<java.lang.Integer> stackId_ = java.util.Collections.emptyList();
+ private void ensureStackIdIsMutable() {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ stackId_ = new java.util.ArrayList<java.lang.Integer>(stackId_);
+ bitField0_ |= 0x00000040;
+ }
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public java.util.List<java.lang.Integer>
+ getStackIdList() {
+ return java.util.Collections.unmodifiableList(stackId_);
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public int getStackIdCount() {
+ return stackId_.size();
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public int getStackId(int index) {
+ return stackId_.get(index);
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public Builder setStackId(
+ int index, int value) {
+ ensureStackIdIsMutable();
+ stackId_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public Builder addStackId(int value) {
+ ensureStackIdIsMutable();
+ stackId_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public Builder addAllStackId(
+ java.lang.Iterable<? extends java.lang.Integer> values) {
+ ensureStackIdIsMutable();
+ super.addAll(values, stackId_);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated uint32 stack_id = 7;</code>
+ *
+ * <pre>
+ * stack indices in case the procedure was running
+ * </pre>
+ */
+ public Builder clearStackId() {
+ stackId_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ onChanged();
+ return this;
+ }
+
+ // required uint64 last_update = 8;
+ private long lastUpdate_ ;
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ public boolean hasLastUpdate() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ public long getLastUpdate() {
+ return lastUpdate_;
+ }
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ public Builder setLastUpdate(long value) {
+ bitField0_ |= 0x00000080;
+ lastUpdate_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required uint64 last_update = 8;</code>
+ */
+ public Builder clearLastUpdate() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ lastUpdate_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional uint32 timeout = 9;
+ private int timeout_ ;
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ public boolean hasTimeout() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ public int getTimeout() {
+ return timeout_;
+ }
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ public Builder setTimeout(int value) {
+ bitField0_ |= 0x00000100;
+ timeout_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint32 timeout = 9;</code>
+ */
+ public Builder clearTimeout() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ timeout_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional .ForeignExceptionMessage exception = 10;
+ private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_;
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public boolean hasException() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() {
+ if (exceptionBuilder_ == null) {
+ return exception_;
+ } else {
+ return exceptionBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) {
+ if (exceptionBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ exception_ = value;
+ onChanged();
+ } else {
+ exceptionBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000200;
+ return this;
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public Builder setException(
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) {
+ if (exceptionBuilder_ == null) {
+ exception_ = builderForValue.build();
+ onChanged();
+ } else {
+ exceptionBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000200;
+ return this;
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) {
+ if (exceptionBuilder_ == null) {
+ if (((bitField0_ & 0x00000200) == 0x00000200) &&
+ exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) {
+ exception_ =
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial();
+ } else {
+ exception_ = value;
+ }
+ onChanged();
+ } else {
+ exceptionBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000200;
+ return this;
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public Builder clearException() {
+ if (exceptionBuilder_ == null) {
+ exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance();
+ onChanged();
+ } else {
+ exceptionBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000200);
+ return this;
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() {
+ bitField0_ |= 0x00000200;
+ onChanged();
+ return getExceptionFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() {
+ if (exceptionBuilder_ != null) {
+ return exceptionBuilder_.getMessageOrBuilder();
+ } else {
+ return exception_;
+ }
+ }
+ /**
+ * <code>optional .ForeignExceptionMessage exception = 10;</code>
+ *
+ * <pre>
+ * user state/results
+ * </pre>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>
+ getExceptionFieldBuilder() {
+ if (exceptionBuilder_ == null) {
+ exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>(
+ exception_,
+ getParentForChildren(),
+ isClean());
+ exception_ = null;
+ }
+ return exceptionBuilder_;
+ }
+
+ // optional bytes result = 11;
+ private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ public com.google.protobuf.ByteString getResult() {
+ return result_;
+ }
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ public Builder setResult(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ result_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes result = 11;</code>
+ *
+ * <pre>
+ * opaque (user) result structure
+ * </pre>
+ */
+ public Builder clearResult() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ result_ = getDefaultInstance().getResult();
+ onChanged();
+ return this;
+ }
+
+ // optional bytes state_data = 12;
+ private com.google.protobuf.ByteString stateData_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ public boolean hasStateData() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ public com.google.protobuf.ByteString getStateData() {
+ return stateData_;
+ }
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ public Builder setStateData(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000800;
+ stateData_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes state_data = 12;</code>
+ *
+ * <pre>
+ * opaque (user) procedure internal-state
+ * </pre>
+ */
+ public Builder clearStateData() {
+ bitField0_ = (bitField0_ & ~0x00000800);
+ stateData_ = getDefaultInstance().getStateData();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:Procedure)
+ }
+
+ static {
+ defaultInstance = new Procedure(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:Procedure)
+ }
+
+ public interface SequentialProcedureDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool executed = 1;
+ /**
+ * <code>required bool executed = 1;</code>
+ */
+ boolean hasExecuted();
+ /**
+ * <code>required bool executed = 1;</code>
+ */
+ boolean getExecuted();
+ }
+ /**
+ * Protobuf type {@code SequentialProcedureData}
+ *
+ * <pre>
+ **
+ * SequentialProcedure data
+ * </pre>
+ */
+ public static final class SequentialProcedureData extends
+ com.google.protobuf.GeneratedMessage
+ implements SequentialProcedureDataOrBuilder {
+ // Use SequentialProcedureData.newBuilder() to construct.
+ private SequentialProcedureData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SequentialProcedureData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SequentialProcedureData defaultInstance;
+ public static SequentialProcedureData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SequentialProcedureData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SequentialProcedureData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ executed_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<SequentialProcedureData> PARSER =
+ new com.google.protobuf.AbstractParser<SequentialProcedureData>() {
+ public SequentialProcedureData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new SequentialProcedureData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<SequentialProcedureData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bool executed = 1;
+ public static final int EXECUTED_FIELD_NUMBER = 1;
+ private boolean executed_;
+ /**
+ * <code>required bool executed = 1;</code>
+ */
+ public boolean hasExecuted() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool executed = 1;</code>
+ */
+ public boolean getExecuted() {
+ return executed_;
+ }
+
+ private void initFields() {
+ executed_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasExecuted()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, executed_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, executed_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) obj;
+
+ boolean result = true;
+ result = result && (hasExecuted() == other.hasExecuted());
+ if (hasExecuted()) {
+ result = result && (getExecuted()
+ == other.getExecuted());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasExecuted()) {
+ hash = (37 * hash) + EXECUTED_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getExecuted());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SequentialProcedureData}
+ *
+ * <pre>
+ **
+ * SequentialProcedure data
+ * </pre>
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ executed_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData build() {
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.executed_ = executed_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.getDefaultInstance()) return this;
+ if (other.hasExecuted()) {
+ setExecuted(other.getExecuted());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasExecuted()) {
+
+ return false;
+ }
+ return tr
<TRUNCATED>
[05/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
new file mode 100644
index 0000000..0669549
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestProcedureStoreTracker {
+ private static final Log LOG = LogFactory.getLog(TestProcedureStoreTracker.class);
+
+ static class TestProcedure extends Procedure<Void> {
+ public TestProcedure(long procId) {
+ setProcId(procId);
+ }
+
+ @Override
+ protected Procedure[] execute(Void env) { return null; }
+
+ @Override
+ protected void rollback(Void env) { /* no-op */ }
+
+ @Override
+ protected boolean abort(Void env) { return false; }
+
+ @Override
+ protected void serializeStateData(final OutputStream stream) { /* no-op */ }
+
+ @Override
+ protected void deserializeStateData(final InputStream stream) { /* no-op */ }
+ }
+
+ @Test
+ public void testSeqInsertAndDelete() {
+ ProcedureStoreTracker tracker = new ProcedureStoreTracker();
+ assertTrue(tracker.isEmpty());
+
+ final int MIN_PROC = 1;
+ final int MAX_PROC = 1 << 10;
+
+ // sequential insert
+ for (int i = MIN_PROC; i < MAX_PROC; ++i) {
+ tracker.insert(i);
+
+ // All the proc that we inserted should not be deleted
+ for (int j = MIN_PROC; j <= i; ++j) {
+ assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(j));
+ }
+ // All the proc that are not yet inserted should be result as deleted
+ for (int j = i + 1; j < MAX_PROC; ++j) {
+ assertTrue(tracker.isDeleted(j) != ProcedureStoreTracker.DeleteState.NO);
+ }
+ }
+
+ // sequential delete
+ for (int i = MIN_PROC; i < MAX_PROC; ++i) {
+ tracker.delete(i);
+
+ // All the proc that we deleted should be deleted
+ for (int j = MIN_PROC; j <= i; ++j) {
+ assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(j));
+ }
+ // All the proc that are not yet deleted should be result as not deleted
+ for (int j = i + 1; j < MAX_PROC; ++j) {
+ assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(j));
+ }
+ }
+ assertTrue(tracker.isEmpty());
+ }
+
+ @Test
+ public void testPartialTracker() {
+ ProcedureStoreTracker tracker = new ProcedureStoreTracker();
+ tracker.setPartialFlag(true);
+
+ // nothing in the tracker, the state is unknown
+ assertTrue(tracker.isEmpty());
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(1));
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(579));
+
+ // Mark 1 as deleted, now that is a known state
+ tracker.setDeleted(1, true);
+ tracker.dump();
+ assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(1));
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(2));
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(579));
+
+ // Mark 579 as non-deleted, now that is a known state
+ tracker.setDeleted(579, false);
+ assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(1));
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(2));
+ assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(579));
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(577));
+ assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(580));
+ }
+
+ @Test
+ public void testBasicCRUD() {
+ ProcedureStoreTracker tracker = new ProcedureStoreTracker();
+ assertTrue(tracker.isEmpty());
+
+ Procedure[] procs = new TestProcedure[] {
+ new TestProcedure(1), new TestProcedure(2), new TestProcedure(3),
+ new TestProcedure(4), new TestProcedure(5), new TestProcedure(6),
+ };
+
+ tracker.insert(procs[0], null);
+ tracker.insert(procs[1], new Procedure[] { procs[2], procs[3], procs[4] });
+ assertFalse(tracker.isEmpty());
+ assertTrue(tracker.isUpdated());
+
+ tracker.resetUpdates();
+ assertFalse(tracker.isUpdated());
+
+ for (int i = 0; i < 4; ++i) {
+ tracker.update(procs[i]);
+ assertFalse(tracker.isEmpty());
+ assertFalse(tracker.isUpdated());
+ }
+
+ tracker.update(procs[4]);
+ assertFalse(tracker.isEmpty());
+ assertTrue(tracker.isUpdated());
+
+ tracker.update(procs[5]);
+ assertFalse(tracker.isEmpty());
+ assertTrue(tracker.isUpdated());
+
+ for (int i = 0; i < 5; ++i) {
+ tracker.delete(procs[i].getProcId());
+ assertFalse(tracker.isEmpty());
+ assertTrue(tracker.isUpdated());
+ }
+ tracker.delete(procs[5].getProcId());
+ assertTrue(tracker.isEmpty());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
new file mode 100644
index 0000000..344b28b
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
@@ -0,0 +1,267 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Iterator;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.procedure2.SequentialProcedure;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.IOUtils;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestWALProcedureStore {
+ private static final Log LOG = LogFactory.getLog(TestWALProcedureStore.class);
+
+ private static final int PROCEDURE_STORE_SLOTS = 1;
+ private static final Procedure NULL_PROC = null;
+
+ private WALProcedureStore procStore;
+
+ private HBaseCommonTestingUtility htu;
+ private FileSystem fs;
+ private Path testDir;
+ private Path logDir;
+
+ @Before
+ public void setUp() throws IOException {
+ htu = new HBaseCommonTestingUtility();
+ testDir = htu.getDataTestDir();
+ fs = testDir.getFileSystem(htu.getConfiguration());
+ assertTrue(testDir.depth() > 1);
+
+ logDir = new Path(testDir, "proc-logs");
+ procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir);
+ procStore.start(PROCEDURE_STORE_SLOTS);
+ procStore.recoverLease();
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ procStore.stop(false);
+ fs.delete(logDir, true);
+ }
+
+ private Iterator<Procedure> storeRestart() throws Exception {
+ procStore.stop(false);
+ procStore.start(PROCEDURE_STORE_SLOTS);
+ procStore.recoverLease();
+ return procStore.load();
+ }
+
+ @Test
+ public void testEmptyLogLoad() throws Exception {
+ Iterator<Procedure> loader = storeRestart();
+ assertEquals(0, countProcedures(loader));
+ }
+
+ @Test
+ public void testLoad() throws Exception {
+ Set<Long> procIds = new HashSet<>();
+
+ // Insert something in the log
+ Procedure proc1 = new TestSequentialProcedure();
+ procIds.add(proc1.getProcId());
+ procStore.insert(proc1, null);
+
+ Procedure proc2 = new TestSequentialProcedure();
+ Procedure[] child2 = new Procedure[2];
+ child2[0] = new TestSequentialProcedure();
+ child2[1] = new TestSequentialProcedure();
+
+ procIds.add(proc2.getProcId());
+ procIds.add(child2[0].getProcId());
+ procIds.add(child2[1].getProcId());
+ procStore.insert(proc2, child2);
+
+ // Verify that everything is there
+ verifyProcIdsOnRestart(procIds);
+
+ // Update and delete something
+ procStore.update(proc1);
+ procStore.update(child2[1]);
+ procStore.delete(child2[1].getProcId());
+ procIds.remove(child2[1].getProcId());
+
+ // Verify that everything is there
+ verifyProcIdsOnRestart(procIds);
+
+ // Remove 4 byte from the trailers
+ procStore.stop(false);
+ FileStatus[] logs = fs.listStatus(logDir);
+ assertEquals(3, logs.length);
+ for (int i = 0; i < logs.length; ++i) {
+ corruptLog(logs[i], 4);
+ }
+ verifyProcIdsOnRestart(procIds);
+ }
+
+ @Test
+ public void testCorruptedTrailer() throws Exception {
+ // Insert something
+ for (int i = 0; i < 100; ++i) {
+ procStore.insert(new TestSequentialProcedure(), null);
+ }
+
+ // Stop the store
+ procStore.stop(false);
+
+ // Remove 4 byte from the trailer
+ FileStatus[] logs = fs.listStatus(logDir);
+ assertEquals(1, logs.length);
+ corruptLog(logs[0], 4);
+
+ int count = countProcedures(storeRestart());
+ assertEquals(100, count);
+ }
+
+ @Test
+ public void testCorruptedEntries() throws Exception {
+ // Insert something
+ for (int i = 0; i < 100; ++i) {
+ procStore.insert(new TestSequentialProcedure(), null);
+ }
+
+ // Stop the store
+ procStore.stop(false);
+
+ // Remove some byte from the log
+ // (enough to cut the trailer and corrupt some entries)
+ FileStatus[] logs = fs.listStatus(logDir);
+ assertEquals(1, logs.length);
+ corruptLog(logs[0], 1823);
+
+ int count = countProcedures(storeRestart());
+ assertTrue(procStore.getCorruptedLogs() != null);
+ assertEquals(1, procStore.getCorruptedLogs().size());
+ assertEquals(85, count);
+ }
+
+ private void corruptLog(final FileStatus logFile, final long dropBytes)
+ throws IOException {
+ assertTrue(logFile.getLen() > dropBytes);
+ LOG.debug("corrupt log " + logFile.getPath() +
+ " size=" + logFile.getLen() + " drop=" + dropBytes);
+ Path tmpPath = new Path(testDir, "corrupted.log");
+ InputStream in = fs.open(logFile.getPath());
+ OutputStream out = fs.create(tmpPath);
+ IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true);
+ fs.rename(tmpPath, logFile.getPath());
+ }
+
+ private void verifyProcIdsOnRestart(final Set<Long> procIds) throws Exception {
+ int count = 0;
+ Iterator<Procedure> loader = storeRestart();
+ while (loader.hasNext()) {
+ Procedure proc = loader.next();
+ LOG.debug("loading procId=" + proc.getProcId());
+ assertTrue("procId=" + proc.getProcId() + " unexpected", procIds.contains(proc.getProcId()));
+ count++;
+ }
+ assertEquals(procIds.size(), count);
+ }
+
+ private void assertIsEmpty(Iterator<Procedure> iterator) {
+ assertEquals(0, countProcedures(iterator));
+ }
+
+ private int countProcedures(Iterator<Procedure> iterator) {
+ int count = 0;
+ while (iterator.hasNext()) {
+ Procedure proc = iterator.next();
+ LOG.trace("loading procId=" + proc.getProcId());
+ count++;
+ }
+ return count;
+ }
+
+ private void assertEmptyLogDir() {
+ try {
+ FileStatus[] status = fs.listStatus(logDir);
+ assertTrue("expected empty state-log dir", status == null || status.length == 0);
+ } catch (FileNotFoundException e) {
+ fail("expected the state-log dir to be present: " + logDir);
+ } catch (IOException e) {
+ fail("got en exception on state-log dir list: " + e.getMessage());
+ }
+ }
+
+ public static class TestSequentialProcedure extends SequentialProcedure<Void> {
+ private static long seqid = 0;
+
+ public TestSequentialProcedure() {
+ setProcId(++seqid);
+ }
+
+ @Override
+ protected Procedure[] execute(Void env) { return null; }
+
+ @Override
+ protected void rollback(Void env) { }
+
+ @Override
+ protected boolean abort(Void env) { return false; }
+
+ @Override
+ protected void serializeStateData(final OutputStream stream) throws IOException {
+ long procId = getProcId();
+ if (procId % 2 == 0) {
+ stream.write(Bytes.toBytes(procId));
+ }
+ }
+
+ @Override
+ protected void deserializeStateData(InputStream stream) throws IOException {
+ long procId = getProcId();
+ if (procId % 2 == 0) {
+ byte[] bProcId = new byte[8];
+ assertEquals(8, stream.read(bProcId));
+ assertEquals(procId, Bytes.toLong(bProcId));
+ } else {
+ assertEquals(0, stream.available());
+ }
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
new file mode 100644
index 0000000..aff536a
--- /dev/null
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.util;
+
+
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestTimeoutBlockingQueue {
+ private static final Log LOG = LogFactory.getLog(TestTimeoutBlockingQueue.class);
+
+ static class TestObject {
+ private long timeout;
+ private int seqId;
+
+ public TestObject(int seqId, long timeout) {
+ this.timeout = timeout;
+ this.seqId = seqId;
+ }
+
+ public long getTimeout() {
+ return timeout;
+ }
+
+ public String toString() {
+ return String.format("(%03d, %03d)", seqId, timeout);
+ }
+ }
+
+ static class TestObjectTimeoutRetriever implements TimeoutRetriever<TestObject> {
+ @Override
+ public long getTimeout(TestObject obj) {
+ return obj.getTimeout();
+ }
+
+ @Override
+ public TimeUnit getTimeUnit(TestObject obj) {
+ return TimeUnit.MILLISECONDS;
+ }
+ }
+
+ @Test
+ public void testOrder() {
+ TimeoutBlockingQueue<TestObject> queue =
+ new TimeoutBlockingQueue<TestObject>(8, new TestObjectTimeoutRetriever());
+
+ long[] timeouts = new long[] {500, 200, 700, 300, 600, 600, 200, 800, 500};
+
+ for (int i = 0; i < timeouts.length; ++i) {
+ for (int j = 0; j <= i; ++j) {
+ queue.add(new TestObject(j, timeouts[j]));
+ queue.dump();
+ }
+
+ long prev = 0;
+ for (int j = 0; j <= i; ++j) {
+ TestObject obj = queue.poll();
+ assertTrue(obj.getTimeout() >= prev);
+ prev = obj.getTimeout();
+ queue.dump();
+ }
+ }
+ }
+
+ @Test
+ public void testTimeoutBlockingQueue() {
+ TimeoutBlockingQueue<TestObject> queue;
+
+ int[][] testArray = new int[][] {
+ {200, 400, 600}, // append
+ {200, 400, 100}, // prepend
+ {200, 400, 300}, // insert
+ };
+
+ for (int i = 0; i < testArray.length; ++i) {
+ int[] sortedArray = Arrays.copyOf(testArray[i], testArray[i].length);
+ Arrays.sort(sortedArray);
+
+ // test with head == 0
+ queue = new TimeoutBlockingQueue<TestObject>(2, new TestObjectTimeoutRetriever());
+ for (int j = 0; j < testArray[i].length; ++j) {
+ queue.add(new TestObject(j, testArray[i][j]));
+ queue.dump();
+ }
+
+ for (int j = 0; !queue.isEmpty(); ++j) {
+ assertEquals(sortedArray[j], queue.poll().getTimeout());
+ }
+
+ queue = new TimeoutBlockingQueue<TestObject>(2, new TestObjectTimeoutRetriever());
+ queue.add(new TestObject(0, 50));
+ assertEquals(50, queue.poll().getTimeout());
+
+ // test with head > 0
+ for (int j = 0; j < testArray[i].length; ++j) {
+ queue.add(new TestObject(j, testArray[i][j]));
+ queue.dump();
+ }
+
+ for (int j = 0; !queue.isEmpty(); ++j) {
+ assertEquals(sortedArray[j], queue.poll().getTimeout());
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-protocol/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 7787c52..0d33332 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -176,6 +176,7 @@
<include>MapReduce.proto</include>
<include>Master.proto</include>
<include>MultiRowMutation.proto</include>
+ <include>Procedure.proto</include>
<include>Quota.proto</include>
<include>RegionServerStatus.proto</include>
<include>RowProcessor.proto</include>
[45/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java
index 8a017a2,0000000..31778ae
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java
@@@ -1,310 -1,0 +1,308 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.mob.mapreduce;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestMobSweeper {
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private String tableName;
+ private final static String row = "row_";
+ private final static String family = "family";
+ private final static String column = "column";
- private static HTable table;
++ private static Table table;
++ private static BufferedMutator bufMut;
+ private static Admin admin;
+
+ private Random random = new Random();
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 15); // avoid major compactions
+ TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.max", 30); // avoid major compactions
+ TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+
+ TEST_UTIL.startMiniCluster();
+
+ TEST_UTIL.startMiniMapReduceCluster();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniMapReduceCluster();
+ }
+
+ @SuppressWarnings("deprecation")
+ @Before
+ public void setUp() throws Exception {
+ long tid = System.currentTimeMillis();
+ tableName = "testSweeper" + tid;
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+
+ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), tableName);
- table.setAutoFlush(false, false);
-
++ Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
++ TableName tn = TableName.valueOf(tableName);
++ table = c.getTable(tn);
++ bufMut = c.getBufferedMutator(tn);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ admin.disableTable(TableName.valueOf(tableName));
+ admin.deleteTable(TableName.valueOf(tableName));
+ admin.close();
+ }
+
+ private Path getMobFamilyPath(Configuration conf, String tableNameStr,
+ String familyName) {
+ Path p = new Path(MobUtils.getMobRegionPath(conf, TableName.valueOf(tableNameStr)),
+ familyName);
+ return p;
+ }
+
+ private String mergeString(Set<String> set) {
+ StringBuilder sb = new StringBuilder();
+ for (String s : set)
+ sb.append(s);
+ return sb.toString();
+ }
+
- private void generateMobTable(Admin admin, HTable table, String tableName, int count,
++ private void generateMobTable(Admin admin, BufferedMutator table, String tableName, int count,
+ int flushStep) throws IOException, InterruptedException {
+ if (count <= 0 || flushStep <= 0)
+ return;
+ int index = 0;
+ for (int i = 0; i < count; i++) {
+ byte[] mobVal = new byte[101*1024];
+ random.nextBytes(mobVal);
+
+ Put put = new Put(Bytes.toBytes(row + i));
- put.add(Bytes.toBytes(family), Bytes.toBytes(column), mobVal);
- table.put(put);
++ put.addColumn(Bytes.toBytes(family), Bytes.toBytes(column), mobVal);
++ table.mutate(put);
+ if (index++ % flushStep == 0) {
- table.flushCommits();
++ table.flush();
+ admin.flush(TableName.valueOf(tableName));
+ }
+ }
- table.flushCommits();
++ table.flush();
+ admin.flush(TableName.valueOf(tableName));
+ }
+
+ @Test
+ public void testSweeper() throws Exception {
+ int count = 10;
+ //create table and generate 10 mob files
- generateMobTable(admin, table, tableName, count, 1);
++ generateMobTable(admin, bufMut, tableName, count, 1);
+ //get mob files
+ Path mobFamilyPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
+ FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
- // mobFileSet0 stores the orignal mob files
++ // mobFileSet0 stores the original mob files
+ TreeSet<String> mobFilesSet = new TreeSet<String>();
+ for (FileStatus status : fileStatuses) {
+ mobFilesSet.add(status.getPath().getName());
+ }
+
+ //scan the table, retreive the references
+ Scan scan = new Scan();
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
+ ResultScanner rs = table.getScanner(scan);
+ TreeSet<String> mobFilesScanned = new TreeSet<String>();
+ for (Result res : rs) {
+ byte[] valueBytes = res.getValue(Bytes.toBytes(family),
+ Bytes.toBytes(column));
+ mobFilesScanned.add(Bytes.toString(valueBytes, Bytes.SIZEOF_INT,
+ valueBytes.length - Bytes.SIZEOF_INT));
+ }
+ //there should be 10 mob files
+ assertEquals(10, mobFilesScanned.size());
+ //check if we store the correct reference of mob files
+ assertEquals(mergeString(mobFilesSet), mergeString(mobFilesScanned));
+
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setLong(SweepJob.MOB_SWEEP_JOB_DELAY, 24 * 60 * 60 * 1000);
+
+ String[] args = new String[2];
+ args[0] = tableName;
+ args[1] = family;
+ assertEquals(0, ToolRunner.run(conf, new Sweeper(), args));
+
+ mobFamilyPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
+ fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ mobFilesSet = new TreeSet<String>();
+ for (FileStatus status : fileStatuses) {
+ mobFilesSet.add(status.getPath().getName());
+ }
+ assertEquals(10, mobFilesSet.size());
+
+ scan = new Scan();
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
+ rs = table.getScanner(scan);
+ TreeSet<String> mobFilesScannedAfterJob = new TreeSet<String>();
+ for (Result res : rs) {
+ byte[] valueBytes = res.getValue(Bytes.toBytes(family), Bytes.toBytes(
+ column));
+ mobFilesScannedAfterJob.add(Bytes.toString(valueBytes, Bytes.SIZEOF_INT,
+ valueBytes.length - Bytes.SIZEOF_INT));
+ }
+ assertEquals(10, mobFilesScannedAfterJob.size());
+
+ fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ mobFilesSet = new TreeSet<String>();
+ for (FileStatus status : fileStatuses) {
+ mobFilesSet.add(status.getPath().getName());
+ }
+ assertEquals(10, mobFilesSet.size());
+ assertEquals(true, mobFilesScannedAfterJob.iterator().next()
+ .equalsIgnoreCase(mobFilesSet.iterator().next()));
+ }
+
- private void testCompactionDelaySweeperInternal(HTable table, String tableName)
++ private void testCompactionDelaySweeperInternal(Table table, BufferedMutator bufMut, String tableName)
+ throws Exception {
+ int count = 10;
+ //create table and generate 10 mob files
- generateMobTable(admin, table, tableName, count, 1);
++ generateMobTable(admin, bufMut, tableName, count, 1);
+ //get mob files
+ Path mobFamilyPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
+ FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ // mobFileSet0 stores the orignal mob files
+ TreeSet<String> mobFilesSet = new TreeSet<String>();
+ for (FileStatus status : fileStatuses) {
+ mobFilesSet.add(status.getPath().getName());
+ }
+
+ //scan the table, retreive the references
+ Scan scan = new Scan();
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
+ ResultScanner rs = table.getScanner(scan);
+ TreeSet<String> mobFilesScanned = new TreeSet<String>();
+ for (Result res : rs) {
+ byte[] valueBytes = res.getValue(Bytes.toBytes(family),
+ Bytes.toBytes(column));
+ mobFilesScanned.add(Bytes.toString(valueBytes, Bytes.SIZEOF_INT,
+ valueBytes.length - Bytes.SIZEOF_INT));
+ }
+ //there should be 10 mob files
+ assertEquals(10, mobFilesScanned.size());
+ //check if we store the correct reference of mob files
+ assertEquals(mergeString(mobFilesSet), mergeString(mobFilesScanned));
+
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setLong(SweepJob.MOB_SWEEP_JOB_DELAY, 0);
+ String[] args = new String[2];
+ args[0] = tableName;
+ args[1] = family;
+ assertEquals(0, ToolRunner.run(conf, new Sweeper(), args));
+
+ mobFamilyPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
+ fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ mobFilesSet = new TreeSet<String>();
+ for (FileStatus status : fileStatuses) {
+ mobFilesSet.add(status.getPath().getName());
+ }
+ assertEquals(1, mobFilesSet.size());
+
+ scan = new Scan();
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ scan.setAttribute(MobConstants.MOB_SCAN_REF_ONLY, Bytes.toBytes(Boolean.TRUE));
+ rs = table.getScanner(scan);
+ TreeSet<String> mobFilesScannedAfterJob = new TreeSet<String>();
+ for (Result res : rs) {
+ byte[] valueBytes = res.getValue(Bytes.toBytes(family), Bytes.toBytes(
+ column));
+ mobFilesScannedAfterJob.add(Bytes.toString(valueBytes, Bytes.SIZEOF_INT,
+ valueBytes.length - Bytes.SIZEOF_INT));
+ }
+ assertEquals(1, mobFilesScannedAfterJob.size());
+
+ fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath);
+ mobFilesSet = new TreeSet<String>();
+ for (FileStatus status : fileStatuses) {
+ mobFilesSet.add(status.getPath().getName());
+ }
+ assertEquals(1, mobFilesSet.size());
+ assertEquals(true, mobFilesScannedAfterJob.iterator().next()
+ .equalsIgnoreCase(mobFilesSet.iterator().next()));
+ }
+
+ @Test
+ public void testCompactionDelaySweeper() throws Exception {
- testCompactionDelaySweeperInternal(table, tableName);
++ testCompactionDelaySweeperInternal(table, bufMut, tableName);
+ }
+
+ @Test
+ public void testCompactionDelaySweeperWithNamespace() throws Exception {
+ // create a table with namespace
+ NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build();
+ admin.createNamespace(namespaceDescriptor);
+ String tableNameAsString = "ns:testSweeperWithNamespace";
+ TableName tableName = TableName.valueOf(tableNameAsString);
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
+ admin.createTable(desc);
- HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
- table.setAutoFlush(false, false);
- testCompactionDelaySweeperInternal(table, tableNameAsString);
++ Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
++ BufferedMutator bufMut = c.getBufferedMutator(tableName);
++ Table table = c.getTable(tableName);
++ testCompactionDelaySweeperInternal(table, bufMut, tableNameAsString);
+ table.close();
+ admin.disableTable(tableName);
+ admin.deleteTable(tableName);
+ admin.deleteNamespace("ns");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
index 028e602,0000000..6dbcec0
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
@@@ -1,225 -1,0 +1,218 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.HBaseAdmin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Put;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestDeleteMobTable {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static byte[] FAMILY = Bytes.toBytes("family");
+ private final static byte[] QF = Bytes.toBytes("qualifier");
+ private static Random random = new Random();
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * Generate the mob value.
+ *
+ * @param size
+ * the size of the value
+ * @return the mob value generated
+ */
+ private static byte[] generateMobValue(int size) {
+ byte[] mobVal = new byte[size];
+ random.nextBytes(mobVal);
+ return mobVal;
+ }
+
+ @Test
+ public void testDeleteMobTable() throws Exception {
+ byte[] tableName = Bytes.toBytes("testDeleteMobTable");
+ TableName tn = TableName.valueOf(tableName);
+ HTableDescriptor htd = new HTableDescriptor(tn);
+ HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0);
+ htd.addFamily(hcd);
+ HBaseAdmin admin = null;
- HTable table = null;
++ Table table = null;
+ try {
- admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
++ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(htd);
- table = new HTable(TEST_UTIL.getConfiguration(), tableName);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn);
+ byte[] value = generateMobValue(10);
+
+ byte[] row = Bytes.toBytes("row");
+ Put put = new Put(row);
- put.add(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
++ put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
+ table.put(put);
+
- table.flushCommits();
- admin.flush(tableName);
++ admin.flush(tn);
+
+ // the mob file exists
+ Assert.assertEquals(1, countMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
+ String fileName = assertHasOneMobRow(table, tn, hcd.getNameAsString());
+ Assert.assertFalse(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
+ Assert.assertTrue(mobTableDirExist(tn));
+ table.close();
+
+ admin.disableTable(tn);
+ admin.deleteTable(tn);
+
+ Assert.assertFalse(admin.tableExists(tn));
+ Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertEquals(1, countArchiveMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertTrue(mobArchiveExist(tn, hcd.getNameAsString(), fileName));
+ Assert.assertFalse(mobTableDirExist(tn));
+ } finally {
+ if (admin != null) {
+ admin.close();
+ }
+ }
+ }
+
+ @Test
+ public void testDeleteNonMobTable() throws Exception {
+ byte[] tableName = Bytes.toBytes("testDeleteNonMobTable");
+ TableName tn = TableName.valueOf(tableName);
+ HTableDescriptor htd = new HTableDescriptor(tn);
+ HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+ htd.addFamily(hcd);
+ HBaseAdmin admin = null;
- HTable table = null;
++ Table table = null;
+ try {
- admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
++ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(htd);
- table = new HTable(TEST_UTIL.getConfiguration(), tableName);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn);
+ byte[] value = generateMobValue(10);
+
+ byte[] row = Bytes.toBytes("row");
+ Put put = new Put(row);
- put.add(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
++ put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value);
+ table.put(put);
+
- table.flushCommits();
- admin.flush(tableName);
++ admin.flush(tn);
+ table.close();
+
+ // the mob file doesn't exist
+ Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertFalse(mobTableDirExist(tn));
+
+ admin.disableTable(tn);
+ admin.deleteTable(tn);
+
+ Assert.assertFalse(admin.tableExists(tn));
+ Assert.assertEquals(0, countMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertEquals(0, countArchiveMobFiles(tn, hcd.getNameAsString()));
+ Assert.assertFalse(mobTableDirExist(tn));
+ } finally {
+ if (admin != null) {
+ admin.close();
+ }
+ }
+ }
+
+ private int countMobFiles(TableName tn, String familyName) throws IOException {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Path mobFileDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName);
+ if (fs.exists(mobFileDir)) {
+ return fs.listStatus(mobFileDir).length;
+ } else {
+ return 0;
+ }
+ }
+
+ private int countArchiveMobFiles(TableName tn, String familyName)
+ throws IOException {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn,
+ MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName);
+ if (fs.exists(storePath)) {
+ return fs.listStatus(storePath).length;
+ } else {
+ return 0;
+ }
+ }
+
+ private boolean mobTableDirExist(TableName tn) throws IOException {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Path tableDir = FSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn);
+ return fs.exists(tableDir);
+ }
+
+ private boolean mobArchiveExist(TableName tn, String familyName, String fileName)
+ throws IOException {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn,
+ MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName);
+ return fs.exists(new Path(storePath, fileName));
+ }
+
- private String assertHasOneMobRow(HTable table, TableName tn, String familyName)
++ private String assertHasOneMobRow(Table table, TableName tn, String familyName)
+ throws IOException {
+ Scan scan = new Scan();
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ ResultScanner rs = table.getScanner(scan);
+ Result r = rs.next();
+ Assert.assertNotNull(r);
+ byte[] value = r.getValue(FAMILY, QF);
+ String fileName = Bytes.toString(value, Bytes.SIZEOF_INT, value.length - Bytes.SIZEOF_INT);
+ Path filePath = new Path(
+ MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName), fileName);
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ Assert.assertTrue(fs.exists(filePath));
+ r = rs.next();
+ Assert.assertNull(r);
+ return fileName;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 3a0f9be,0000000..39fd410
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@@ -1,472 -1,0 +1,472 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.concurrent.ConcurrentSkipListSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.mockito.Mockito;
+
+@Category(MediumTests.class)
+public class TestHMobStore {
+ public static final Log LOG = LogFactory.getLog(TestHMobStore.class);
+ @Rule public TestName name = new TestName();
+
+ private HMobStore store;
+ private HRegion region;
+ private HColumnDescriptor hcd;
+ private FileSystem fs;
+ private byte [] table = Bytes.toBytes("table");
+ private byte [] family = Bytes.toBytes("family");
+ private byte [] row = Bytes.toBytes("row");
+ private byte [] row2 = Bytes.toBytes("row2");
+ private byte [] qf1 = Bytes.toBytes("qf1");
+ private byte [] qf2 = Bytes.toBytes("qf2");
+ private byte [] qf3 = Bytes.toBytes("qf3");
+ private byte [] qf4 = Bytes.toBytes("qf4");
+ private byte [] qf5 = Bytes.toBytes("qf5");
+ private byte [] qf6 = Bytes.toBytes("qf6");
+ private byte[] value = Bytes.toBytes("value");
+ private byte[] value2 = Bytes.toBytes("value2");
+ private Path mobFilePath;
+ private Date currentDate = new Date();
+ private KeyValue seekKey1;
+ private KeyValue seekKey2;
+ private KeyValue seekKey3;
+ private NavigableSet<byte[]> qualifiers =
+ new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
+ private List<Cell> expected = new ArrayList<Cell>();
+ private long id = System.currentTimeMillis();
+ private Get get = new Get(row);
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final String DIR = TEST_UTIL.getDataTestDir("TestHMobStore").toString();
+
+ /**
+ * Setup
+ * @throws Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ qualifiers.add(qf1);
+ qualifiers.add(qf3);
+ qualifiers.add(qf5);
+
+ Iterator<byte[]> iter = qualifiers.iterator();
+ while(iter.hasNext()){
+ byte [] next = iter.next();
+ expected.add(new KeyValue(row, family, next, 1, value));
+ get.addColumn(family, next);
+ get.setMaxVersions(); // all versions.
+ }
+ }
+
+ private void init(String methodName, Configuration conf, boolean testStore)
+ throws IOException {
+ hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(3L);
+ hcd.setMaxVersions(4);
+ init(methodName, conf, hcd, testStore);
+ }
+
+ private void init(String methodName, Configuration conf,
+ HColumnDescriptor hcd, boolean testStore) throws IOException {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
+ init(methodName, conf, htd, hcd, testStore);
+ }
+
+ private void init(String methodName, Configuration conf, HTableDescriptor htd,
+ HColumnDescriptor hcd, boolean testStore) throws IOException {
+ //Setting up tje Region and Store
+ Path basedir = new Path(DIR+methodName);
+ Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
+ String logName = "logs";
+ Path logdir = new Path(basedir, logName);
+ FileSystem fs = FileSystem.get(conf);
+ fs.delete(logdir, true);
+
+ htd.addFamily(hcd);
+ HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
+
+ final Configuration walConf = new Configuration(conf);
+ FSUtils.setRootDir(walConf, basedir);
+ final WALFactory wals = new WALFactory(walConf, null, methodName);
+ region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf,
+ info, htd, null);
+ store = new HMobStore(region, hcd, conf);
+ if(testStore) {
+ init(conf, hcd);
+ }
+ }
+
+ private void init(Configuration conf, HColumnDescriptor hcd)
+ throws IOException {
+ Path basedir = FSUtils.getRootDir(conf);
+ fs = FileSystem.get(conf);
+ Path homePath = new Path(basedir, Bytes.toString(family) + Path.SEPARATOR
+ + Bytes.toString(family));
+ fs.mkdirs(homePath);
+
+ KeyValue key1 = new KeyValue(row, family, qf1, 1, value);
+ KeyValue key2 = new KeyValue(row, family, qf2, 1, value);
+ KeyValue key3 = new KeyValue(row2, family, qf3, 1, value2);
+ KeyValue[] keys = new KeyValue[] { key1, key2, key3 };
+ int maxKeyCount = keys.length;
+ StoreFile.Writer mobWriter = store.createWriterInTmp(currentDate, maxKeyCount,
- hcd.getCompactionCompression(), region.getStartKey());
++ hcd.getCompactionCompression(), region.getRegionInfo().getStartKey());
+ mobFilePath = mobWriter.getPath();
+
+ mobWriter.append(key1);
+ mobWriter.append(key2);
+ mobWriter.append(key3);
+ mobWriter.close();
+
+ String targetPathName = MobUtils.formatDate(currentDate);
+ byte[] referenceValue = Bytes.toBytes(targetPathName + Path.SEPARATOR + mobFilePath.getName());
+ Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName().getName());
+ KeyValue kv1 = new KeyValue(row, family, qf1, Long.MAX_VALUE, referenceValue);
+ KeyValue kv2 = new KeyValue(row, family, qf2, Long.MAX_VALUE, referenceValue);
+ KeyValue kv3 = new KeyValue(row2, family, qf3, Long.MAX_VALUE, referenceValue);
+ seekKey1 = MobUtils.createMobRefKeyValue(kv1, referenceValue, tableNameTag);
+ seekKey2 = MobUtils.createMobRefKeyValue(kv2, referenceValue, tableNameTag);
+ seekKey3 = MobUtils.createMobRefKeyValue(kv3, referenceValue, tableNameTag);
+ }
+
+ /**
+ * Getting data from memstore
+ * @throws IOException
+ */
+ @Test
+ public void testGetFromMemStore() throws IOException {
+ final Configuration conf = HBaseConfiguration.create();
+ init(name.getMethodName(), conf, false);
+
+ //Put data in memstore
+ this.store.add(new KeyValue(row, family, qf1, 1, value));
+ this.store.add(new KeyValue(row, family, qf2, 1, value));
+ this.store.add(new KeyValue(row, family, qf3, 1, value));
+ this.store.add(new KeyValue(row, family, qf4, 1, value));
+ this.store.add(new KeyValue(row, family, qf5, 1, value));
+ this.store.add(new KeyValue(row, family, qf6, 1, value));
+
+ Scan scan = new Scan(get);
+ InternalScanner scanner = (InternalScanner) store.getScanner(scan,
+ scan.getFamilyMap().get(store.getFamily().getName()),
+ 0);
+
+ List<Cell> results = new ArrayList<Cell>();
+ scanner.next(results);
+ Collections.sort(results, KeyValue.COMPARATOR);
+ scanner.close();
+
+ //Compare
+ Assert.assertEquals(expected.size(), results.size());
+ for(int i=0; i<results.size(); i++) {
+ // Verify the values
+ Assert.assertEquals(expected.get(i), results.get(i));
+ }
+ }
+
+ /**
+ * Getting MOB data from files
+ * @throws IOException
+ */
+ @Test
+ public void testGetFromFiles() throws IOException {
+ final Configuration conf = TEST_UTIL.getConfiguration();
+ init(name.getMethodName(), conf, false);
+
+ //Put data in memstore
+ this.store.add(new KeyValue(row, family, qf1, 1, value));
+ this.store.add(new KeyValue(row, family, qf2, 1, value));
+ //flush
+ flush(1);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf3, 1, value));
+ this.store.add(new KeyValue(row, family, qf4, 1, value));
+ //flush
+ flush(2);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf5, 1, value));
+ this.store.add(new KeyValue(row, family, qf6, 1, value));
+ //flush
+ flush(3);
+
+ Scan scan = new Scan(get);
+ InternalScanner scanner = (InternalScanner) store.getScanner(scan,
+ scan.getFamilyMap().get(store.getFamily().getName()),
+ 0);
+
+ List<Cell> results = new ArrayList<Cell>();
+ scanner.next(results);
+ Collections.sort(results, KeyValue.COMPARATOR);
+ scanner.close();
+
+ //Compare
+ Assert.assertEquals(expected.size(), results.size());
+ for(int i=0; i<results.size(); i++) {
+ Assert.assertEquals(expected.get(i), results.get(i));
+ }
+ }
+
+ /**
+ * Getting the reference data from files
+ * @throws IOException
+ */
+ @Test
+ public void testGetReferencesFromFiles() throws IOException {
+ final Configuration conf = HBaseConfiguration.create();
+ init(name.getMethodName(), conf, false);
+
+ //Put data in memstore
+ this.store.add(new KeyValue(row, family, qf1, 1, value));
+ this.store.add(new KeyValue(row, family, qf2, 1, value));
+ //flush
+ flush(1);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf3, 1, value));
+ this.store.add(new KeyValue(row, family, qf4, 1, value));
+ //flush
+ flush(2);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf5, 1, value));
+ this.store.add(new KeyValue(row, family, qf6, 1, value));
+ //flush
+ flush(3);
+
+ Scan scan = new Scan(get);
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ InternalScanner scanner = (InternalScanner) store.getScanner(scan,
+ scan.getFamilyMap().get(store.getFamily().getName()),
+ 0);
+
+ List<Cell> results = new ArrayList<Cell>();
+ scanner.next(results);
+ Collections.sort(results, KeyValue.COMPARATOR);
+ scanner.close();
+
+ //Compare
+ Assert.assertEquals(expected.size(), results.size());
+ for(int i=0; i<results.size(); i++) {
+ Cell cell = results.get(i);
+ Assert.assertTrue(MobUtils.isMobReferenceCell(cell));
+ }
+ }
+
+ /**
+ * Getting data from memstore and files
+ * @throws IOException
+ */
+ @Test
+ public void testGetFromMemStoreAndFiles() throws IOException {
+
+ final Configuration conf = HBaseConfiguration.create();
+
+ init(name.getMethodName(), conf, false);
+
+ //Put data in memstore
+ this.store.add(new KeyValue(row, family, qf1, 1, value));
+ this.store.add(new KeyValue(row, family, qf2, 1, value));
+ //flush
+ flush(1);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf3, 1, value));
+ this.store.add(new KeyValue(row, family, qf4, 1, value));
+ //flush
+ flush(2);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf5, 1, value));
+ this.store.add(new KeyValue(row, family, qf6, 1, value));
+
+ Scan scan = new Scan(get);
+ InternalScanner scanner = (InternalScanner) store.getScanner(scan,
+ scan.getFamilyMap().get(store.getFamily().getName()),
+ 0);
+
+ List<Cell> results = new ArrayList<Cell>();
+ scanner.next(results);
+ Collections.sort(results, KeyValue.COMPARATOR);
+ scanner.close();
+
+ //Compare
+ Assert.assertEquals(expected.size(), results.size());
+ for(int i=0; i<results.size(); i++) {
+ Assert.assertEquals(expected.get(i), results.get(i));
+ }
+ }
+
+ /**
+ * Getting data from memstore and files
+ * @throws IOException
+ */
+ @Test
+ public void testMobCellSizeThreshold() throws IOException {
+
+ final Configuration conf = HBaseConfiguration.create();
+
+ HColumnDescriptor hcd;
+ hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(100);
+ hcd.setMaxVersions(4);
+ init(name.getMethodName(), conf, hcd, false);
+
+ //Put data in memstore
+ this.store.add(new KeyValue(row, family, qf1, 1, value));
+ this.store.add(new KeyValue(row, family, qf2, 1, value));
+ //flush
+ flush(1);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf3, 1, value));
+ this.store.add(new KeyValue(row, family, qf4, 1, value));
+ //flush
+ flush(2);
+
+ //Add more data
+ this.store.add(new KeyValue(row, family, qf5, 1, value));
+ this.store.add(new KeyValue(row, family, qf6, 1, value));
+ //flush
+ flush(3);
+
+ Scan scan = new Scan(get);
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ InternalScanner scanner = (InternalScanner) store.getScanner(scan,
+ scan.getFamilyMap().get(store.getFamily().getName()),
+ 0);
+
+ List<Cell> results = new ArrayList<Cell>();
+ scanner.next(results);
+ Collections.sort(results, KeyValue.COMPARATOR);
+ scanner.close();
+
+ //Compare
+ Assert.assertEquals(expected.size(), results.size());
+ for(int i=0; i<results.size(); i++) {
+ Cell cell = results.get(i);
+ //this is not mob reference cell.
+ Assert.assertFalse(MobUtils.isMobReferenceCell(cell));
+ Assert.assertEquals(expected.get(i), results.get(i));
+ Assert.assertEquals(100, store.getFamily().getMobThreshold());
+ }
+ }
+
+ @Test
+ public void testCommitFile() throws Exception {
+ final Configuration conf = HBaseConfiguration.create();
+ init(name.getMethodName(), conf, true);
+ String targetPathName = MobUtils.formatDate(new Date());
+ Path targetPath = new Path(store.getPath(), (targetPathName
+ + Path.SEPARATOR + mobFilePath.getName()));
+ fs.delete(targetPath, true);
+ Assert.assertFalse(fs.exists(targetPath));
+ //commit file
+ store.commitFile(mobFilePath, targetPath);
+ Assert.assertTrue(fs.exists(targetPath));
+ }
+
+ @Test
+ public void testResolve() throws Exception {
+ final Configuration conf = HBaseConfiguration.create();
+ init(name.getMethodName(), conf, true);
+ String targetPathName = MobUtils.formatDate(currentDate);
+ Path targetPath = new Path(store.getPath(), targetPathName);
+ store.commitFile(mobFilePath, targetPath);
+ //resolve
+ Cell resultCell1 = store.resolve(seekKey1, false);
+ Cell resultCell2 = store.resolve(seekKey2, false);
+ Cell resultCell3 = store.resolve(seekKey3, false);
+ //compare
+ Assert.assertEquals(Bytes.toString(value),
+ Bytes.toString(CellUtil.cloneValue(resultCell1)));
+ Assert.assertEquals(Bytes.toString(value),
+ Bytes.toString(CellUtil.cloneValue(resultCell2)));
+ Assert.assertEquals(Bytes.toString(value2),
+ Bytes.toString(CellUtil.cloneValue(resultCell3)));
+ }
+
+ /**
+ * Flush the memstore
+ * @param storeFilesSize
+ * @throws IOException
+ */
+ private void flush(int storeFilesSize) throws IOException{
+ this.store.snapshot();
+ flushStore(store, id++);
+ Assert.assertEquals(storeFilesSize, this.store.getStorefiles().size());
+ Assert.assertEquals(0, ((DefaultMemStore)this.store.memstore).cellSet.size());
+ }
+
+ /**
+ * Flush the memstore
+ * @param store
+ * @param id
+ * @throws IOException
+ */
+ private static void flushStore(HMobStore store, long id) throws IOException {
+ StoreFlushContext storeFlushCtx = store.createFlushContext(id);
+ storeFlushCtx.prepare();
+ storeFlushCtx.flushCache(Mockito.mock(MonitoredTask.class));
+ storeFlushCtx.commit(Mockito.mock(MonitoredTask.class));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java
index d429de5,0000000..005bdfe
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java
@@@ -1,467 -1,0 +1,466 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
- import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Test mob compaction
+ */
+@Category(MediumTests.class)
+public class TestMobCompaction {
+ @Rule
+ public TestName name = new TestName();
+ static final Log LOG = LogFactory.getLog(TestMobCompaction.class.getName());
+ private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private Configuration conf = null;
+
+ private HRegion region = null;
+ private HTableDescriptor htd = null;
+ private HColumnDescriptor hcd = null;
+ private long mobCellThreshold = 1000;
+
+ private FileSystem fs;
+
+ private static final byte[] COLUMN_FAMILY = fam1;
+ private final byte[] STARTROW = Bytes.toBytes(START_KEY);
+ private int compactionThreshold;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ private void init(Configuration conf, long mobThreshold) throws Exception {
+ this.conf = conf;
+ this.mobCellThreshold = mobThreshold;
+ HBaseTestingUtility UTIL = new HBaseTestingUtility(conf);
+
+ compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
+ htd = UTIL.createTableDescriptor(name.getMethodName());
+ hcd = new HColumnDescriptor(COLUMN_FAMILY);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(mobThreshold);
+ hcd.setMaxVersions(1);
+ htd.modifyFamily(hcd);
+
+ region = UTIL.createLocalHRegion(htd, null, null);
+ fs = FileSystem.get(conf);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ region.close();
+ fs.delete(UTIL.getDataTestDir(), true);
+ }
+
+ /**
+ * During compaction, cells smaller than the threshold won't be affected.
+ */
+ @Test
+ public void testSmallerValue() throws Exception {
+ init(UTIL.getConfiguration(), 500);
+ byte[] dummyData = makeDummyData(300); // smaller than mob threshold
+ HRegionIncommon loader = new HRegionIncommon(region);
+ // one hfile per row
+ for (int i = 0; i < compactionThreshold; i++) {
+ Put p = createPut(i, dummyData);
+ loader.put(p);
+ loader.flushcache();
+ }
+ assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles());
+ assertEquals("Before compaction: mob file count", 0, countMobFiles());
+ assertEquals("Before compaction: rows", compactionThreshold, countRows());
+ assertEquals("Before compaction: mob rows", 0, countMobRows());
+
+ region.compactStores();
+
+ assertEquals("After compaction: store files", 1, countStoreFiles());
+ assertEquals("After compaction: mob file count", 0, countMobFiles());
+ assertEquals("After compaction: referenced mob file count", 0, countReferencedMobFiles());
+ assertEquals("After compaction: rows", compactionThreshold, countRows());
+ assertEquals("After compaction: mob rows", 0, countMobRows());
+ }
+
+ /**
+ * During compaction, the mob threshold size is changed.
+ */
+ @Test
+ public void testLargerValue() throws Exception {
+ init(UTIL.getConfiguration(), 200);
+ byte[] dummyData = makeDummyData(300); // larger than mob threshold
+ HRegionIncommon loader = new HRegionIncommon(region);
+ for (int i = 0; i < compactionThreshold; i++) {
+ Put p = createPut(i, dummyData);
+ loader.put(p);
+ loader.flushcache();
+ }
+ assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles());
+ assertEquals("Before compaction: mob file count", compactionThreshold, countMobFiles());
+ assertEquals("Before compaction: rows", compactionThreshold, countRows());
+ assertEquals("Before compaction: mob rows", compactionThreshold, countMobRows());
+ assertEquals("Before compaction: number of mob cells", compactionThreshold,
+ countMobCellsInMetadata());
+ // Change the threshold larger than the data size
+ region.getTableDesc().getFamily(COLUMN_FAMILY).setMobThreshold(500);
+ region.initialize();
+ region.compactStores();
+
+ assertEquals("After compaction: store files", 1, countStoreFiles());
+ assertEquals("After compaction: mob file count", compactionThreshold, countMobFiles());
+ assertEquals("After compaction: referenced mob file count", 0, countReferencedMobFiles());
+ assertEquals("After compaction: rows", compactionThreshold, countRows());
+ assertEquals("After compaction: mob rows", 0, countMobRows());
+ }
+
+ /**
+ * This test will first generate store files, then bulk load them and trigger the compaction. When
+ * compaction, the cell value will be larger than the threshold.
+ */
+ @Test
+ public void testMobCompactionWithBulkload() throws Exception {
+ // The following will produce store files of 600.
+ init(UTIL.getConfiguration(), 300);
+ byte[] dummyData = makeDummyData(600);
+
+ Path hbaseRootDir = FSUtils.getRootDir(conf);
+ Path basedir = new Path(hbaseRootDir, htd.getNameAsString());
- List<Pair<byte[], String>> hfiles = new ArrayList<Pair<byte[], String>>(1);
++ List<Pair<byte[], String>> hfiles = new ArrayList<>(1);
+ for (int i = 0; i < compactionThreshold; i++) {
+ Path hpath = new Path(basedir, "hfile" + i);
+ hfiles.add(Pair.newPair(COLUMN_FAMILY, hpath.toString()));
+ createHFile(hpath, i, dummyData);
+ }
+
+ // The following will bulk load the above generated store files and compact, with 600(fileSize)
+ // > 300(threshold)
- boolean result = region.bulkLoadHFiles(hfiles, true);
++ boolean result = region.bulkLoadHFiles(hfiles, true, null);
+ assertTrue("Bulkload result:", result);
+ assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles());
+ assertEquals("Before compaction: mob file count", 0, countMobFiles());
+ assertEquals("Before compaction: rows", compactionThreshold, countRows());
+ assertEquals("Before compaction: mob rows", 0, countMobRows());
+ assertEquals("Before compaction: referenced mob file count", 0, countReferencedMobFiles());
+
+ region.compactStores();
+
+ assertEquals("After compaction: store files", 1, countStoreFiles());
+ assertEquals("After compaction: mob file count:", 1, countMobFiles());
+ assertEquals("After compaction: rows", compactionThreshold, countRows());
+ assertEquals("After compaction: mob rows", compactionThreshold, countMobRows());
+ assertEquals("After compaction: referenced mob file count", 1, countReferencedMobFiles());
+ assertEquals("After compaction: number of mob cells", compactionThreshold,
+ countMobCellsInMetadata());
+ }
+
+ @Test
+ public void testMajorCompactionAfterDelete() throws Exception {
+ init(UTIL.getConfiguration(), 100);
+ byte[] dummyData = makeDummyData(200); // larger than mob threshold
+ HRegionIncommon loader = new HRegionIncommon(region);
+ // create hfiles and mob hfiles but don't trigger compaction
+ int numHfiles = compactionThreshold - 1;
+ byte[] deleteRow = Bytes.add(STARTROW, Bytes.toBytes(0));
+ for (int i = 0; i < numHfiles; i++) {
+ Put p = createPut(i, dummyData);
+ loader.put(p);
+ loader.flushcache();
+ }
+ assertEquals("Before compaction: store files", numHfiles, countStoreFiles());
+ assertEquals("Before compaction: mob file count", numHfiles, countMobFiles());
+ assertEquals("Before compaction: rows", numHfiles, countRows());
+ assertEquals("Before compaction: mob rows", numHfiles, countMobRows());
+ assertEquals("Before compaction: number of mob cells", numHfiles, countMobCellsInMetadata());
+ // now let's delete some cells that contain mobs
+ Delete delete = new Delete(deleteRow);
- delete.deleteFamily(COLUMN_FAMILY);
++ delete.addFamily(COLUMN_FAMILY);
+ region.delete(delete);
+ loader.flushcache();
+
+ assertEquals("Before compaction: store files", numHfiles + 1, countStoreFiles());
+ assertEquals("Before compaction: mob files", numHfiles, countMobFiles());
- region.compactStores(true);
++ // region.compactStores();
++ region.compact(true);
+ assertEquals("After compaction: store files", 1, countStoreFiles());
+ // still have original mob hfiles and now added a mob del file
+ assertEquals("After compaction: mob files", numHfiles + 1, countMobFiles());
+
+ Scan scan = new Scan();
+ scan.setRaw(true);
+ InternalScanner scanner = region.getScanner(scan);
- List<Cell> results = new ArrayList<Cell>();
++ List<Cell> results = new ArrayList<>();
+ scanner.next(results);
+ int deleteCount = 0;
+ while (!results.isEmpty()) {
+ for (Cell c : results) {
+ if (c.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()) {
+ deleteCount++;
+ assertTrue(Bytes.equals(CellUtil.cloneRow(c), deleteRow));
+ }
+ }
+ results.clear();
+ scanner.next(results);
+ }
+ // assert the delete mark is not retained after the major compaction
+ assertEquals(0, deleteCount);
+ scanner.close();
+ // assert the deleted cell is not counted
+ assertEquals("The cells in mob files", numHfiles - 1, countMobCellsInMobFiles(1));
+ }
+
+ private int countStoreFiles() throws IOException {
+ Store store = region.getStore(COLUMN_FAMILY);
+ return store.getStorefilesCount();
+ }
+
+ private int countMobFiles() throws IOException {
+ Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()),
+ hcd.getNameAsString());
+ if (fs.exists(mobDirPath)) {
+ FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath);
+ return files.length;
+ }
+ return 0;
+ }
+
+ private long countMobCellsInMetadata() throws IOException {
+ long mobCellsCount = 0;
+ Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()),
+ hcd.getNameAsString());
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
+ CacheConfig cacheConfig = new CacheConfig(copyOfConf);
+ if (fs.exists(mobDirPath)) {
+ FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath);
+ for (FileStatus file : files) {
+ StoreFile sf = new StoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE);
+ Map<byte[], byte[]> fileInfo = sf.createReader().loadFileInfo();
+ byte[] count = fileInfo.get(StoreFile.MOB_CELLS_COUNT);
+ assertTrue(count != null);
+ mobCellsCount += Bytes.toLong(count);
+ }
+ }
+ return mobCellsCount;
+ }
+
+ private Put createPut(int rowIdx, byte[] dummyData) throws IOException {
+ Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)));
+ p.setDurability(Durability.SKIP_WAL);
- p.add(COLUMN_FAMILY, Bytes.toBytes("colX"), dummyData);
++ p.addColumn(COLUMN_FAMILY, Bytes.toBytes("colX"), dummyData);
+ return p;
+ }
+
+ /**
+ * Create an HFile with the given number of bytes
+ */
+ private void createHFile(Path path, int rowIdx, byte[] dummyData) throws IOException {
+ HFileContext meta = new HFileContextBuilder().build();
+ HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
+ .withFileContext(meta).create();
+ long now = System.currentTimeMillis();
+ try {
+ KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY,
+ Bytes.toBytes("colX"), now, dummyData);
+ writer.append(kv);
+ } finally {
+ writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
+ writer.close();
+ }
+ }
+
+ private int countMobRows() throws IOException {
+ Scan scan = new Scan();
+ // Do not retrieve the mob data when scanning
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ InternalScanner scanner = region.getScanner(scan);
+
+ int scannedCount = 0;
- List<Cell> results = new ArrayList<Cell>();
++ List<Cell> results = new ArrayList<>();
+ boolean hasMore = true;
+ while (hasMore) {
+ hasMore = scanner.next(results);
+ for (Cell c : results) {
+ if (MobUtils.isMobReferenceCell(c)) {
+ scannedCount++;
+ }
+ }
+ results.clear();
+ }
+ scanner.close();
+
+ return scannedCount;
+ }
+
+ private int countRows() throws IOException {
+ Scan scan = new Scan();
+ // Do not retrieve the mob data when scanning
+ InternalScanner scanner = region.getScanner(scan);
+
+ int scannedCount = 0;
+ List<Cell> results = new ArrayList<Cell>();
+ boolean hasMore = true;
+ while (hasMore) {
+ hasMore = scanner.next(results);
+ scannedCount += results.size();
+ results.clear();
+ }
+ scanner.close();
+
+ return scannedCount;
+ }
+
+ private byte[] makeDummyData(int size) {
+ byte[] dummyData = new byte[size];
+ new Random().nextBytes(dummyData);
+ return dummyData;
+ }
+
+ private int countReferencedMobFiles() throws IOException {
+ Scan scan = new Scan();
+ // Do not retrieve the mob data when scanning
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ InternalScanner scanner = region.getScanner(scan);
+
- List<Cell> kvs = new ArrayList<Cell>();
++ List<Cell> kvs = new ArrayList<>();
+ boolean hasMore = true;
+ String fileName;
- Set<String> files = new HashSet<String>();
++ Set<String> files = new HashSet<>();
+ do {
+ kvs.clear();
+ hasMore = scanner.next(kvs);
- for (Cell c : kvs) {
- KeyValue kv = KeyValueUtil.ensureKeyValue(c);
++ for (Cell kv : kvs) {
+ if (!MobUtils.isMobReferenceCell(kv)) {
+ continue;
+ }
+ if (!MobUtils.hasValidMobRefCellValue(kv)) {
+ continue;
+ }
+ int size = MobUtils.getMobValueLength(kv);
+ if (size <= mobCellThreshold) {
+ continue;
+ }
+ fileName = MobUtils.getMobFileName(kv);
+ if (fileName.isEmpty()) {
+ continue;
+ }
+ files.add(fileName);
+ Path familyPath = MobUtils.getMobFamilyPath(conf, htd.getTableName(),
+ hcd.getNameAsString());
+ assertTrue(fs.exists(new Path(familyPath, fileName)));
+ }
+ } while (hasMore);
+
+ scanner.close();
+
+ return files.size();
+ }
+
+ private int countMobCellsInMobFiles(int expectedNumDelfiles) throws IOException {
+ Configuration copyOfConf = new Configuration(conf);
+ copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
+ CacheConfig cacheConfig = new CacheConfig(copyOfConf);
+ Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()),
+ hcd.getNameAsString());
- List<StoreFile> sfs = new ArrayList<StoreFile>();
++ List<StoreFile> sfs = new ArrayList<>();
+ int numDelfiles = 0;
+ int size = 0;
+ if (fs.exists(mobDirPath)) {
+ for (FileStatus f : fs.listStatus(mobDirPath)) {
+ StoreFile sf = new StoreFile(fs, f.getPath(), conf, cacheConfig, BloomType.NONE);
+ sfs.add(sf);
+ if (StoreFileInfo.isDelFile(sf.getPath())) {
+ numDelfiles++;
+ }
+ }
+ List scanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, null,
+ HConstants.LATEST_TIMESTAMP);
+ Scan scan = new Scan();
+ scan.setMaxVersions(hcd.getMaxVersions());
+ long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
+ long ttl = HStore.determineTTLFromFamily(hcd);
+ ScanInfo scanInfo = new ScanInfo(hcd, ttl, timeToPurgeDeletes, KeyValue.COMPARATOR);
+ StoreScanner scanner = new StoreScanner(scan, scanInfo, ScanType.COMPACT_DROP_DELETES, null,
+ scanners, 0L, HConstants.LATEST_TIMESTAMP);
+ List<Cell> results = new ArrayList<>();
+ boolean hasMore = true;
+ while (hasMore) {
+ hasMore = scanner.next(results);
+ size += results.size();
+ results.clear();
+ }
+ }
+ // assert the number of the existing del files
+ assertEquals(expectedNumDelfiles, numDelfiles);
+ return size;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
index 27a0b06,0000000..3b5a474
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java
@@@ -1,410 -1,0 +1,411 @@@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestMobStoreScanner {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final static byte [] row1 = Bytes.toBytes("row1");
+ private final static byte [] family = Bytes.toBytes("family");
+ private final static byte [] qf1 = Bytes.toBytes("qualifier1");
+ private final static byte [] qf2 = Bytes.toBytes("qualifier2");
+ protected final byte[] qf3 = Bytes.toBytes("qualifier3");
- private static HTable table;
++ private static Table table;
+ private static HBaseAdmin admin;
+ private static HColumnDescriptor hcd;
+ private static HTableDescriptor desc;
+ private static Random random = new Random();
+ private static long defaultThreshold = 10;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0);
+ TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true);
+ TEST_UTIL.getConfiguration().setInt("hbase.client.keyvalue.maxsize", 100*1024*1024);
+
+ TEST_UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ public void setUp(long threshold, String TN) throws Exception {
+ desc = new HTableDescriptor(TableName.valueOf(TN));
+ hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(threshold);
+ hcd.setMaxVersions(4);
+ desc.addFamily(hcd);
- admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
++ admin = TEST_UTIL.getHBaseAdmin();
+ admin.createTable(desc);
- table = new HTable(TEST_UTIL.getConfiguration(), TN);
++ table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
++ .getTable(TableName.valueOf(TN));
+ }
+
+ /**
+ * Generate the mob value.
+ *
+ * @param size the size of the value
+ * @return the mob value generated
+ */
+ private static byte[] generateMobValue(int size) {
+ byte[] mobVal = new byte[size];
+ random.nextBytes(mobVal);
+ return mobVal;
+ }
+
+ /**
+ * Set the scan attribute
+ *
+ * @param reversed if true, scan will be backward order
+ * @param mobScanRaw if true, scan will get the mob reference
+ * @return this
+ */
+ public void setScan(Scan scan, boolean reversed, boolean mobScanRaw) {
+ scan.setReversed(reversed);
+ scan.setMaxVersions(4);
+ if(mobScanRaw) {
+ scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
+ }
+ }
+
+ @Test
+ public void testMobStoreScanner() throws Exception {
+ testGetFromFiles(false);
+ testGetFromMemStore(false);
+ testGetReferences(false);
+ testMobThreshold(false);
+ testGetFromArchive(false);
+ }
+
+ @Test
+ public void testReversedMobStoreScanner() throws Exception {
+ testGetFromFiles(true);
+ testGetFromMemStore(true);
+ testGetReferences(true);
+ testMobThreshold(true);
+ testGetFromArchive(true);
+ }
+
+ @Test(timeout=60000)
+ public void testGetMassive() throws Exception {
+ String TN = "testGetMassive";
+ setUp(defaultThreshold, TN);
+
+ // Put some data 5 10, 15, 20 mb ok (this would be right below protobuf default max size of 64MB.
+ // 25, 30, 40 fail. these is above protobuf max size of 64MB
+ byte[] bigValue = new byte[25*1024*1024];
+
+ Put put = new Put(row1);
- put.add(family, qf1, bigValue);
- put.add(family, qf2, bigValue);
- put.add(family, qf3, bigValue);
++ put.addColumn(family, qf1, bigValue);
++ put.addColumn(family, qf2, bigValue);
++ put.addColumn(family, qf3, bigValue);
+ table.put(put);
+
+ Get g = new Get(row1);
+ Result r = table.get(g);
+ // should not have blown up.
+ }
+
+ public void testGetFromFiles(boolean reversed) throws Exception {
+ String TN = "testGetFromFiles" + reversed;
++ TableName tn = TableName.valueOf(TN);
+ setUp(defaultThreshold, TN);
+ long ts1 = System.currentTimeMillis();
+ long ts2 = ts1 + 1;
+ long ts3 = ts1 + 2;
+ byte [] value = generateMobValue((int)defaultThreshold+1);
+
+ Put put1 = new Put(row1);
- put1.add(family, qf1, ts3, value);
- put1.add(family, qf2, ts2, value);
- put1.add(family, qf3, ts1, value);
++ put1.addColumn(family, qf1, ts3, value);
++ put1.addColumn(family, qf2, ts2, value);
++ put1.addColumn(family, qf3, ts1, value);
+ table.put(put1);
+
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(tn);
+
+ Scan scan = new Scan();
+ setScan(scan, reversed, false);
+
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ Assert.assertEquals(Bytes.toString(value),
+ Bytes.toString(CellUtil.cloneValue(cell)));
+ count++;
+ }
+ }
+ results.close();
+ Assert.assertEquals(3, count);
+ }
+
+ public void testGetFromMemStore(boolean reversed) throws Exception {
+ String TN = "testGetFromMemStore" + reversed;
+ setUp(defaultThreshold, TN);
+ long ts1 = System.currentTimeMillis();
+ long ts2 = ts1 + 1;
+ long ts3 = ts1 + 2;
+ byte [] value = generateMobValue((int)defaultThreshold+1);;
+
+ Put put1 = new Put(row1);
- put1.add(family, qf1, ts3, value);
- put1.add(family, qf2, ts2, value);
- put1.add(family, qf3, ts1, value);
++ put1.addColumn(family, qf1, ts3, value);
++ put1.addColumn(family, qf2, ts2, value);
++ put1.addColumn(family, qf3, ts1, value);
+ table.put(put1);
+
+ Scan scan = new Scan();
+ setScan(scan, reversed, false);
+
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ Assert.assertEquals(Bytes.toString(value),
+ Bytes.toString(CellUtil.cloneValue(cell)));
+ count++;
+ }
+ }
+ results.close();
+ Assert.assertEquals(3, count);
+ }
+
+ public void testGetReferences(boolean reversed) throws Exception {
+ String TN = "testGetReferences" + reversed;
++ TableName tn = TableName.valueOf(TN);
+ setUp(defaultThreshold, TN);
+ long ts1 = System.currentTimeMillis();
+ long ts2 = ts1 + 1;
+ long ts3 = ts1 + 2;
+ byte [] value = generateMobValue((int)defaultThreshold+1);;
+
+ Put put1 = new Put(row1);
- put1.add(family, qf1, ts3, value);
- put1.add(family, qf2, ts2, value);
- put1.add(family, qf3, ts1, value);
++ put1.addColumn(family, qf1, ts3, value);
++ put1.addColumn(family, qf2, ts2, value);
++ put1.addColumn(family, qf3, ts1, value);
+ table.put(put1);
+
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(tn);
+
+ Scan scan = new Scan();
+ setScan(scan, reversed, true);
+
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ assertIsMobReference(cell, row1, family, value, TN);
+ count++;
+ }
+ }
+ results.close();
+ Assert.assertEquals(3, count);
+ }
+
+ public void testMobThreshold(boolean reversed) throws Exception {
+ String TN = "testMobThreshold" + reversed;
++ TableName tn = TableName.valueOf(TN);
+ setUp(defaultThreshold, TN);
+ byte [] valueLess = generateMobValue((int)defaultThreshold-1);
+ byte [] valueEqual = generateMobValue((int)defaultThreshold);
+ byte [] valueGreater = generateMobValue((int)defaultThreshold+1);
+ long ts1 = System.currentTimeMillis();
+ long ts2 = ts1 + 1;
+ long ts3 = ts1 + 2;
+
+ Put put1 = new Put(row1);
- put1.add(family, qf1, ts3, valueLess);
- put1.add(family, qf2, ts2, valueEqual);
- put1.add(family, qf3, ts1, valueGreater);
++ put1.addColumn(family, qf1, ts3, valueLess);
++ put1.addColumn(family, qf2, ts2, valueEqual);
++ put1.addColumn(family, qf3, ts1, valueGreater);
+ table.put(put1);
+
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(tn);
+
+ Scan scan = new Scan();
+ setScan(scan, reversed, true);
+
+ Cell cellLess= null;
+ Cell cellEqual = null;
+ Cell cellGreater = null;
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ String qf = Bytes.toString(CellUtil.cloneQualifier(cell));
+ if(qf.equals(Bytes.toString(qf1))) {
+ cellLess = cell;
+ }
+ if(qf.equals(Bytes.toString(qf2))) {
+ cellEqual = cell;
+ }
+ if(qf.equals(Bytes.toString(qf3))) {
+ cellGreater = cell;
+ }
+ count++;
+ }
+ }
+ Assert.assertEquals(3, count);
+ assertNotMobReference(cellLess, row1, family, valueLess);
+ assertNotMobReference(cellEqual, row1, family, valueEqual);
+ assertIsMobReference(cellGreater, row1, family, valueGreater, TN);
+ results.close();
+ }
+
+ public void testGetFromArchive(boolean reversed) throws Exception {
+ String TN = "testGetFromArchive" + reversed;
++ TableName tn = TableName.valueOf(TN);
+ setUp(defaultThreshold, TN);
+ long ts1 = System.currentTimeMillis();
+ long ts2 = ts1 + 1;
+ long ts3 = ts1 + 2;
+ byte [] value = generateMobValue((int)defaultThreshold+1);;
+ // Put some data
+ Put put1 = new Put(row1);
- put1.add(family, qf1, ts3, value);
- put1.add(family, qf2, ts2, value);
- put1.add(family, qf3, ts1, value);
++ put1.addColumn(family, qf1, ts3, value);
++ put1.addColumn(family, qf2, ts2, value);
++ put1.addColumn(family, qf3, ts1, value);
+ table.put(put1);
+
- table.flushCommits();
- admin.flush(TN);
++ admin.flush(tn);
+
+ // Get the files in the mob path
+ Path mobFamilyPath;
+ mobFamilyPath = new Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(),
+ TableName.valueOf(TN)), hcd.getNameAsString());
+ FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+ FileStatus[] files = fs.listStatus(mobFamilyPath);
+
+ // Get the archive path
+ Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
+ Path tableDir = FSUtils.getTableDir(rootDir, TableName.valueOf(TN));
+ HRegionInfo regionInfo = MobUtils.getMobRegionInfo(TableName.valueOf(TN));
+ Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(),
+ regionInfo, tableDir, family);
+
+ // Move the files from mob path to archive path
+ fs.mkdirs(storeArchiveDir);
+ int fileCount = 0;
+ for(FileStatus file : files) {
+ fileCount++;
+ Path filePath = file.getPath();
+ Path src = new Path(mobFamilyPath, filePath.getName());
+ Path dst = new Path(storeArchiveDir, filePath.getName());
+ fs.rename(src, dst);
+ }
+
+ // Verify the moving success
+ FileStatus[] files1 = fs.listStatus(mobFamilyPath);
+ Assert.assertEquals(0, files1.length);
+ FileStatus[] files2 = fs.listStatus(storeArchiveDir);
+ Assert.assertEquals(fileCount, files2.length);
+
+ // Scan from archive
+ Scan scan = new Scan();
+ setScan(scan, reversed, false);
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ List<Cell> cells = res.listCells();
+ for(Cell cell : cells) {
+ // Verify the value
+ Assert.assertEquals(Bytes.toString(value),
+ Bytes.toString(CellUtil.cloneValue(cell)));
+ count++;
+ }
+ }
+ results.close();
+ Assert.assertEquals(3, count);
+ }
+
+ /**
+ * Assert the value is not store in mob.
+ */
+ private static void assertNotMobReference(Cell cell, byte[] row, byte[] family,
+ byte[] value) throws IOException {
+ Assert.assertEquals(Bytes.toString(row),
+ Bytes.toString(CellUtil.cloneRow(cell)));
+ Assert.assertEquals(Bytes.toString(family),
+ Bytes.toString(CellUtil.cloneFamily(cell)));
+ Assert.assertTrue(Bytes.toString(value).equals(
+ Bytes.toString(CellUtil.cloneValue(cell))));
+ }
+
+ /**
+ * Assert the value is store in mob.
+ */
+ private static void assertIsMobReference(Cell cell, byte[] row, byte[] family,
+ byte[] value, String TN) throws IOException {
+ Assert.assertEquals(Bytes.toString(row),
+ Bytes.toString(CellUtil.cloneRow(cell)));
+ Assert.assertEquals(Bytes.toString(family),
+ Bytes.toString(CellUtil.cloneFamily(cell)));
+ Assert.assertFalse(Bytes.toString(value).equals(
+ Bytes.toString(CellUtil.cloneValue(cell))));
+ byte[] referenceValue = CellUtil.cloneValue(cell);
+ String fileName = Bytes.toString(referenceValue, Bytes.SIZEOF_INT,
+ referenceValue.length - Bytes.SIZEOF_INT);
+ int valLen = Bytes.toInt(referenceValue, 0, Bytes.SIZEOF_INT);
+ Assert.assertEquals(value.length, valLen);
+ Path mobFamilyPath;
+ mobFamilyPath = new Path(MobUtils.getMobRegionPath(TEST_UTIL.getConfiguration(),
+ TableName.valueOf(TN)), hcd.getNameAsString());
+ Path targetPath = new Path(mobFamilyPath, fileName);
+ FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+ Assert.assertTrue(fs.exists(targetPath));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 5714351,5498d66..fa634d1
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@@ -360,70 -413,13 +413,77 @@@ public class TestRegionServerMetrics
"_table_"+tableNameString +
"_region_" + i.getEncodedName()+
"_metric";
- metricsHelper.assertCounter(prefix + "_scanNextNumOps", 30, agg);
+ metricsHelper.assertCounter(prefix + "_scanNextNumOps", NUM_SCAN_NEXT, agg);
}
+ metricsHelper.assertCounter("ScanNext_num_ops", numScanNext, serverSource);
+ }
+ try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
+ admin.disableTable(tableName);
+ admin.deleteTable(tableName);
}
}
+
+ @Test
+ public void testMobMetrics() throws IOException, InterruptedException {
+ String tableNameString = "testMobMetrics";
+ TableName tableName = TableName.valueOf(tableNameString);
+ byte[] cf = Bytes.toBytes("d");
+ byte[] qualifier = Bytes.toBytes("qual");
+ byte[] val = Bytes.toBytes("mobdata");
+ int numHfiles = conf.getInt("hbase.hstore.compactionThreshold", 3) - 1;
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(cf);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0);
+ htd.addFamily(hcd);
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ HTable t = TEST_UTIL.createTable(htd, new byte[0][0], conf);
- HRegion region = rs.getOnlineRegions(tableName).get(0);
++ Region region = rs.getOnlineRegions(tableName).get(0);
+ t.setAutoFlush(true, true);
+ for (int insertCount = 0; insertCount < numHfiles; insertCount++) {
+ Put p = new Put(Bytes.toBytes(insertCount));
+ p.add(cf, qualifier, val);
+ t.put(p);
+ admin.flush(tableName);
+ }
+ metricsRegionServer.getRegionServerWrapper().forceRecompute();
+ metricsHelper.assertCounter("mobFlushCount", numHfiles, serverSource);
+ Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(2));
+ ResultScanner scanner = t.getScanner(scan);
+ scanner.next(100);
++ numScanNext++; // this is an ugly construct
+ scanner.close();
+ metricsRegionServer.getRegionServerWrapper().forceRecompute();
+ metricsHelper.assertCounter("mobScanCellsCount", 2, serverSource);
+ region.getTableDesc().getFamily(cf).setMobThreshold(100);
- region.initialize();
- region.compactStores(true);
++ ((HRegion)region).initialize();
++ region.compact(true);
+ metricsRegionServer.getRegionServerWrapper().forceRecompute();
+ metricsHelper.assertCounter("mobCompactedFromMobCellsCount", numHfiles,
+ serverSource);
+ metricsHelper.assertCounter("mobCompactedIntoMobCellsCount", 0, serverSource);
+ scanner = t.getScanner(scan);
+ scanner.next(100);
++ numScanNext++; // this is an ugly construct
+ metricsRegionServer.getRegionServerWrapper().forceRecompute();
+ // metrics are reset by the region initialization
+ metricsHelper.assertCounter("mobScanCellsCount", 0, serverSource);
+ for (int insertCount = numHfiles;
+ insertCount < 2 * numHfiles - 1; insertCount++) {
+ Put p = new Put(Bytes.toBytes(insertCount));
+ p.add(cf, qualifier, val);
+ t.put(p);
+ admin.flush(tableName);
+ }
+ region.getTableDesc().getFamily(cf).setMobThreshold(0);
- region.initialize();
- region.compactStores(true);
++ ((HRegion)region).initialize();
++ region.compact(true);
+ metricsRegionServer.getRegionServerWrapper().forceRecompute();
+ // metrics are reset by the region initialization
+ metricsHelper.assertCounter("mobCompactedFromMobCellsCount", 0, serverSource);
+ metricsHelper.assertCounter("mobCompactedIntoMobCellsCount", 2 * numHfiles - 1,
+ serverSource);
+ t.close();
+ admin.close();
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
index da39f59,1125d11..349ec1c
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
@@@ -59,5 -63,25 +63,25 @@@ public class TestStoreFileInfo
assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name));
}
}
+
+ @Test
+ public void testEqualsWithLink() throws IOException {
+ Path origin = new Path("/origin");
+ Path tmp = new Path("/tmp");
++ Path mob = new Path("/mob");
+ Path archive = new Path("/archive");
+ HFileLink link1 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"),
- new Path(archive, "f1"));
++ new Path(mob, "f1"), new Path(archive, "f1"));
+ HFileLink link2 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"),
- new Path(archive, "f1"));
-
++ new Path(mob, "f1"), new Path(archive, "f1"));
+
+ StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(),
+ TEST_UTIL.getTestFileSystem(), null, link1);
+ StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(),
+ TEST_UTIL.getTestFileSystem(), null, link2);
+
+ assertEquals(info1, info2);
+ assertEquals(info1.hashCode(), info2.hashCode());
+ }
}
[13/50] [abbrv] hbase git commit: HBASE-13204 Procedure v2 - client
create/delete table sync
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 37f726c..c30d92a 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -28,6 +28,7 @@ option optimize_for = SPEED;
import "HBase.proto";
import "Client.proto";
import "ClusterStatus.proto";
+import "ErrorHandling.proto";
import "Quota.proto";
/* Column-level protobufs */
@@ -108,6 +109,7 @@ message CreateTableRequest {
}
message CreateTableResponse {
+ optional uint64 proc_id = 1;
}
message DeleteTableRequest {
@@ -115,6 +117,7 @@ message DeleteTableRequest {
}
message DeleteTableResponse {
+ optional uint64 proc_id = 1;
}
message TruncateTableRequest {
@@ -380,6 +383,24 @@ message IsProcedureDoneResponse {
optional ProcedureDescription snapshot = 2;
}
+message GetProcedureResultRequest {
+ required uint64 proc_id = 1;
+}
+
+message GetProcedureResultResponse {
+ enum State {
+ NOT_FOUND = 0;
+ RUNNING = 1;
+ FINISHED = 2;
+ }
+
+ required State state = 1;
+ optional uint64 start_time = 2;
+ optional uint64 last_update = 3;
+ optional bytes result = 4;
+ optional ForeignExceptionMessage exception = 5;
+}
+
message SetQuotaRequest {
optional string user_name = 1;
optional string user_group = 2;
@@ -634,4 +655,7 @@ service MasterService {
/** Returns the timestamp of the last major compaction */
rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest)
returns(MajorCompactionTimestampResponse);
+
+ rpc getProcedureResult(GetProcedureResultRequest)
+ returns(GetProcedureResultResponse);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8ec883a..e2e600c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1326,7 +1326,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
- public void createTable(HTableDescriptor hTableDescriptor,
+ public long createTable(HTableDescriptor hTableDescriptor,
byte [][] splitKeys) throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
@@ -1357,9 +1357,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.postCreateTable(hTableDescriptor, newRegions);
}
- // TODO: change the interface to return the procId,
- // and add it to the response protobuf.
- //return procId;
+ return procId;
}
/**
@@ -1571,7 +1569,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
- public void deleteTable(final TableName tableName) throws IOException {
+ public long deleteTable(final TableName tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preDeleteTable(tableName);
@@ -1588,9 +1586,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.postDeleteTable(tableName);
}
- // TODO: change the interface to return the procId,
- // and add it to the response protobuf.
- //return procId;
+ return procId;
}
@Override
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 07b2da2..abdbf5a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -43,6 +43,8 @@ import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
@@ -86,6 +88,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnaps
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
@@ -158,6 +162,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.zookeeper.KeeperException;
@@ -405,11 +410,11 @@ public class MasterRpcServices extends RSRpcServices
HTableDescriptor hTableDescriptor = HTableDescriptor.convert(req.getTableSchema());
byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
try {
- master.createTable(hTableDescriptor, splitKeys);
+ long procId = master.createTable(hTableDescriptor, splitKeys);
+ return CreateTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
- return CreateTableResponse.newBuilder().build();
}
@Override
@@ -461,11 +466,11 @@ public class MasterRpcServices extends RSRpcServices
public DeleteTableResponse deleteTable(RpcController controller,
DeleteTableRequest request) throws ServiceException {
try {
- master.deleteTable(ProtobufUtil.toTableName(request.getTableName()));
+ long procId = master.deleteTable(ProtobufUtil.toTableName(request.getTableName()));
+ return DeleteTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
- return DeleteTableResponse.newBuilder().build();
}
@Override
@@ -962,6 +967,44 @@ public class MasterRpcServices extends RSRpcServices
}
@Override
+ public GetProcedureResultResponse getProcedureResult(RpcController controller,
+ GetProcedureResultRequest request) throws ServiceException {
+ LOG.debug("Checking to see if procedure is done procId=" + request.getProcId());
+ try {
+ master.checkInitialized();
+ GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();
+
+ Pair<ProcedureResult, Procedure> v = master.getMasterProcedureExecutor()
+ .getResultOrProcedure(request.getProcId());
+ if (v.getFirst() != null) {
+ ProcedureResult result = v.getFirst();
+ builder.setState(GetProcedureResultResponse.State.FINISHED);
+ builder.setStartTime(result.getStartTime());
+ builder.setLastUpdate(result.getLastUpdate());
+ if (result.isFailed()) {
+ builder.setException(result.getException().convert());
+ }
+ if (result.hasResultData()) {
+ builder.setResult(ByteStringer.wrap(result.getResult()));
+ }
+ master.getMasterProcedureExecutor().removeResult(request.getProcId());
+ } else {
+ Procedure proc = v.getSecond();
+ if (proc == null) {
+ builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
+ } else {
+ builder.setState(GetProcedureResultResponse.State.RUNNING);
+ builder.setStartTime(proc.getStartTime());
+ builder.setLastUpdate(proc.getLastUpdate());
+ }
+ }
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c,
ListNamespaceDescriptorsRequest request) throws ServiceException {
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 7352fe8..d823b35 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -105,7 +105,7 @@ public interface MasterServices extends Server {
* @param splitKeys Starting row keys for the initial table regions. If null
* a single region is created.
*/
- void createTable(HTableDescriptor desc, byte[][] splitKeys)
+ long createTable(HTableDescriptor desc, byte[][] splitKeys)
throws IOException;
/**
@@ -113,7 +113,7 @@ public interface MasterServices extends Server {
* @param tableName The table name
* @throws IOException
*/
- void deleteTable(final TableName tableName) throws IOException;
+ long deleteTable(final TableName tableName) throws IOException;
/**
* Truncate a table
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index ad5e671..84e9bef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -125,6 +125,7 @@ public class DeleteTableProcedure
LOG.debug("delete '" + getTableName() + "' from filesystem");
DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE);
+ regions = null;
break;
case DELETE_TABLE_UPDATE_DESC_CACHE:
LOG.debug("delete '" + getTableName() + "' descriptor");
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 2c13f39..5b2e50d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -227,9 +227,10 @@ public class TestCatalogJanitor {
}
@Override
- public void createTable(HTableDescriptor desc, byte[][] splitKeys)
+ public long createTable(HTableDescriptor desc, byte[][] splitKeys)
throws IOException {
// no-op
+ return -1;
}
@Override
@@ -427,7 +428,9 @@ public class TestCatalogJanitor {
}
@Override
- public void deleteTable(TableName tableName) throws IOException { }
+ public long deleteTable(TableName tableName) throws IOException {
+ return -1;
+ }
@Override
public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException { }
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6e3f46/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 48d0bfc..4a741a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -166,7 +166,7 @@ public class TestHBaseFsck {
conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE);
conf.setInt("hbase.hconnection.threads.core", POOL_SIZE);
conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT);
- conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 2 * REGION_ONLINE_TIMEOUT);
+ conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT);
TEST_UTIL.startMiniCluster(3);
tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS,
[07/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java
new file mode 100644
index 0000000..0aebd5a
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Once a Procedure completes the ProcedureExecutor takes all the useful
+ * information of the procedure (e.g. exception/result) and creates a ProcedureResult.
+ * The user of the Procedure framework will get the procedure result with
+ * procedureExecutor.getResult(procId)
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ProcedureResult {
+ private final RemoteProcedureException exception;
+ private final long lastUpdate;
+ private final long startTime;
+ private final byte[] result;
+
+ private long clientAckTime = -1;
+
+ public ProcedureResult(final long startTime, final long lastUpdate,
+ final RemoteProcedureException exception) {
+ this.lastUpdate = lastUpdate;
+ this.startTime = startTime;
+ this.exception = exception;
+ this.result = null;
+ }
+
+ public ProcedureResult(final long startTime, final long lastUpdate, final byte[] result) {
+ this.lastUpdate = lastUpdate;
+ this.startTime = startTime;
+ this.exception = null;
+ this.result = result;
+ }
+
+ public boolean isFailed() {
+ return exception != null;
+ }
+
+ public RemoteProcedureException getException() {
+ return exception;
+ }
+
+ public boolean hasResultData() {
+ return result != null;
+ }
+
+ public byte[] getResult() {
+ return result;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public long getLastUpdate() {
+ return lastUpdate;
+ }
+
+ public long executionTime() {
+ return lastUpdate - startTime;
+ }
+
+ public boolean hasClientAckTime() {
+ return clientAckTime > 0;
+ }
+
+ public long getClientAckTime() {
+ return clientAckTime;
+ }
+
+ @InterfaceAudience.Private
+ protected void setClientAckTime(final long timestamp) {
+ this.clientAckTime = timestamp;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java
new file mode 100644
index 0000000..2d7ba39
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Keep track of the runnable procedures
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ProcedureRunnableSet {
+ /**
+ * Inserts the specified element at the front of this queue.
+ * @param proc the Procedure to add
+ */
+ void addFront(Procedure proc);
+
+ /**
+ * Inserts the specified element at the end of this queue.
+ * @param proc the Procedure to add
+ */
+ void addBack(Procedure proc);
+
+ /**
+ * The procedure can't run at the moment.
+ * add it back to the queue, giving priority to someone else.
+ * @param proc the Procedure to add back to the list
+ */
+ void yield(Procedure proc);
+
+ /**
+ * The procedure in execution completed.
+ * This can be implemented to perform cleanups.
+ * @param proc the Procedure that completed the execution.
+ */
+ void completionCleanup(Procedure proc);
+
+ /**
+ * Fetch one Procedure from the queue
+ * @return the Procedure ID to execute, or null if nothing present.
+ */
+ Long poll();
+
+ /**
+ * In case the class is blocking on poll() waiting for items to be added,
+ * this method should awake poll() and poll() should return.
+ */
+ void signalAll();
+
+ /**
+ * Returns the number of elements in this collection.
+ * @return the number of elements in this collection.
+ */
+ int size();
+
+ /**
+ * Removes all of the elements from this collection.
+ */
+ void clear();
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java
new file mode 100644
index 0000000..7b17fb2
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Simple runqueue for the procedures
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ProcedureSimpleRunQueue implements ProcedureRunnableSet {
+ private final Deque<Long> runnables = new ArrayDeque<Long>();
+ private final ReentrantLock lock = new ReentrantLock();
+ private final Condition waitCond = lock.newCondition();
+
+ @Override
+ public void addFront(final Procedure proc) {
+ lock.lock();
+ try {
+ runnables.addFirst(proc.getProcId());
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void addBack(final Procedure proc) {
+ lock.lock();
+ try {
+ runnables.addLast(proc.getProcId());
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void yield(final Procedure proc) {
+ addBack(proc);
+ }
+
+ @Override
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
+ public Long poll() {
+ lock.lock();
+ try {
+ if (runnables.isEmpty()) {
+ waitCond.await();
+ if (!runnables.isEmpty()) {
+ return runnables.pop();
+ }
+ } else {
+ return runnables.pop();
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return null;
+ } finally {
+ lock.unlock();
+ }
+ return null;
+ }
+
+ @Override
+ public void signalAll() {
+ lock.lock();
+ try {
+ waitCond.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void clear() {
+ lock.lock();
+ try {
+ runnables.clear();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public int size() {
+ lock.lock();
+ try {
+ return runnables.size();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void completionCleanup(Procedure proc) {
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
new file mode 100644
index 0000000..177ff5b
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+// TODO: Not used yet
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ProcedureYieldException extends ProcedureException {
+ /** default constructor */
+ public ProcedureYieldException() {
+ super();
+ }
+
+ /**
+ * Constructor
+ * @param s message
+ */
+ public ProcedureYieldException(String s) {
+ super(s);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java
new file mode 100644
index 0000000..6be512d
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * A RemoteProcedureException is an exception from another thread or process.
+ * <p>
+ * RemoteProcedureExceptions are sent to 'remote' peers to signal an abort in the face of failures.
+ * When serialized for transmission we encode using Protobufs to ensure version compatibility.
+ * <p>
+ * RemoteProcedureException exceptions contain a Throwable as its cause.
+ * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation
+ * of the original exception created on original 'remote' source. These ProxyThrowables have their
+ * their stacks traces and messages overridden to reflect the original 'remote' exception.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+@SuppressWarnings("serial")
+public class RemoteProcedureException extends ProcedureException {
+
+ /**
+ * Name of the throwable's source such as a host or thread name. Must be non-null.
+ */
+ private final String source;
+
+ /**
+ * Create a new RemoteProcedureException that can be serialized.
+ * It is assumed that this came form a local source.
+ * @param source
+ * @param cause
+ */
+ public RemoteProcedureException(String source, Throwable cause) {
+ super(cause);
+ assert source != null;
+ assert cause != null;
+ this.source = source;
+ }
+
+ public String getSource() {
+ return source;
+ }
+
+ public IOException unwrapRemoteException() {
+ if (getCause() instanceof RemoteException) {
+ return ((RemoteException)getCause()).unwrapRemoteException();
+ }
+ if (getCause() instanceof IOException) {
+ return (IOException)getCause();
+ }
+ return new IOException(getCause());
+ }
+
+ @Override
+ public String toString() {
+ String className = getCause().getClass().getName();
+ return className + " via " + getSource() + ":" + getLocalizedMessage();
+ }
+
+ /**
+ * Converts a RemoteProcedureException to an array of bytes.
+ * @param source the name of the external exception source
+ * @param t the "local" external exception (local)
+ * @return protobuf serialized version of RemoteProcedureException
+ */
+ public static byte[] serialize(String source, Throwable t) {
+ return toProto(source, t).toByteArray();
+ }
+
+ /**
+ * Takes a series of bytes and tries to generate an RemoteProcedureException instance for it.
+ * @param bytes
+ * @return the ForeignExcpetion instance
+ * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown.
+ */
+ public static RemoteProcedureException deserialize(byte[] bytes)
+ throws InvalidProtocolBufferException {
+ return fromProto(ForeignExceptionMessage.parseFrom(bytes));
+ }
+
+ public ForeignExceptionMessage convert() {
+ return ForeignExceptionUtil.toProtoForeignException(getSource(), getCause());
+ }
+
+ public static ForeignExceptionMessage toProto(String source, Throwable t) {
+ return ForeignExceptionUtil.toProtoForeignException(source, t);
+ }
+
+ public static RemoteProcedureException fromProto(final ForeignExceptionMessage eem) {
+ return new RemoteProcedureException(eem.getSource(), ForeignExceptionUtil.toIOException(eem));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
new file mode 100644
index 0000000..bc1af20
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
+
+/**
+ * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure".
+ * A "Root Procedure" is a Procedure without parent, each subprocedure will be
+ * added to the "Root Procedure" stack (or rollback-stack).
+ *
+ * RootProcedureState is used and managed only by the ProcedureExecutor.
+ * Long rootProcId = getRootProcedureId(proc);
+ * rollbackStack.get(rootProcId).acquire(proc)
+ * rollbackStack.get(rootProcId).release(proc)
+ * ...
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class RootProcedureState {
+ private static final Log LOG = LogFactory.getLog(RootProcedureState.class);
+
+ private enum State {
+ RUNNING, // The Procedure is running or ready to run
+ FAILED, // The Procedure failed, waiting for the rollback executing
+ ROLLINGBACK, // The Procedure failed and the execution was rolledback
+ }
+
+ private ArrayList<Procedure> subprocedures = null;
+ private State state = State.RUNNING;
+ private int running = 0;
+
+ public synchronized boolean isFailed() {
+ switch (state) {
+ case ROLLINGBACK:
+ case FAILED:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ public synchronized boolean isRollingback() {
+ return state == State.ROLLINGBACK;
+ }
+
+ /**
+ * Called by the ProcedureExecutor to mark rollback execution
+ */
+ protected synchronized boolean setRollback() {
+ if (running == 0 && state == State.FAILED) {
+ state = State.ROLLINGBACK;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Called by the ProcedureExecutor to mark rollback execution
+ */
+ protected synchronized void unsetRollback() {
+ assert state == State.ROLLINGBACK;
+ state = State.FAILED;
+ }
+
+ protected synchronized List<Procedure> getSubprocedures() {
+ return subprocedures;
+ }
+
+ protected synchronized RemoteProcedureException getException() {
+ if (subprocedures != null) {
+ for (Procedure proc: subprocedures) {
+ if (proc.hasException()) {
+ return proc.getException();
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Called by the ProcedureExecutor to mark the procedure step as running.
+ */
+ protected synchronized boolean acquire(final Procedure proc) {
+ if (state != State.RUNNING) return false;
+
+ running++;
+ return true;
+ }
+
+ /**
+ * Called by the ProcedureExecutor to mark the procedure step as finished.
+ */
+ protected synchronized void release(final Procedure proc) {
+ running--;
+ }
+
+ protected synchronized void abort() {
+ if (state == State.RUNNING) {
+ state = State.FAILED;
+ }
+ }
+
+ /**
+ * Called by the ProcedureExecutor after the procedure step is completed,
+ * to add the step to the rollback list (or procedure stack)
+ */
+ protected synchronized void addRollbackStep(final Procedure proc) {
+ if (proc.isFailed()) {
+ state = State.FAILED;
+ }
+ if (subprocedures == null) {
+ subprocedures = new ArrayList<Procedure>();
+ }
+ proc.addStackIndex(subprocedures.size());
+ subprocedures.add(proc);
+ }
+
+ /**
+ * Called on store load by the ProcedureExecutor to load part of the stack.
+ *
+ * Each procedure has its own stack-positions. Which means we have to write
+ * to the store only the Procedure we executed, and nothing else.
+ * on load we recreate the full stack by aggregating each procedure stack-positions.
+ */
+ protected synchronized void loadStack(final Procedure proc) {
+ int[] stackIndexes = proc.getStackIndexes();
+ if (stackIndexes != null) {
+ if (subprocedures == null) {
+ subprocedures = new ArrayList<Procedure>();
+ }
+ int diff = (1 + stackIndexes[stackIndexes.length - 1]) - subprocedures.size();
+ if (diff > 0) {
+ subprocedures.ensureCapacity(1 + stackIndexes[stackIndexes.length - 1]);
+ while (diff-- > 0) subprocedures.add(null);
+ }
+ for (int i = 0; i < stackIndexes.length; ++i) {
+ subprocedures.set(stackIndexes[i], proc);
+ }
+ }
+ if (proc.getState() == ProcedureState.ROLLEDBACK) {
+ state = State.ROLLINGBACK;
+ } else if (proc.isFailed()) {
+ state = State.FAILED;
+ }
+ }
+
+ /**
+ * Called on store load by the ProcedureExecutor to validate the procedure stack.
+ */
+ protected synchronized boolean isValid() {
+ if (subprocedures != null) {
+ for (Procedure proc: subprocedures) {
+ if (proc == null) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
new file mode 100644
index 0000000..b4b35f2
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData;
+
+/**
+ * A SequentialProcedure describes one step in a procedure chain.
+ * -> Step 1 -> Step 2 -> Step 3
+ *
+ * The main difference from a base Procedure is that the execute() of a
+ * SequentialProcedure will be called only once, there will be no second
+ * execute() call once the child are finished. which means once the child
+ * of a SequentialProcedure are completed the SequentialProcedure is completed too.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class SequentialProcedure<TEnvironment> extends Procedure<TEnvironment> {
+ private boolean executed = false;
+
+ @Override
+ protected Procedure[] doExecute(final TEnvironment env)
+ throws ProcedureYieldException {
+ updateTimestamp();
+ try {
+ Procedure[] children = !executed ? execute(env) : null;
+ executed = !executed;
+ return children;
+ } finally {
+ updateTimestamp();
+ }
+ }
+
+ @Override
+ protected void doRollback(final TEnvironment env) throws IOException {
+ updateTimestamp();
+ if (executed) {
+ try {
+ rollback(env);
+ executed = !executed;
+ } finally {
+ updateTimestamp();
+ }
+ }
+ }
+
+ @Override
+ protected void serializeStateData(final OutputStream stream) throws IOException {
+ SequentialProcedureData.Builder data = SequentialProcedureData.newBuilder();
+ data.setExecuted(executed);
+ data.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ protected void deserializeStateData(final InputStream stream) throws IOException {
+ SequentialProcedureData data = SequentialProcedureData.parseDelimitedFrom(stream);
+ executed = data.getExecuted();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
new file mode 100644
index 0000000..eab96e4
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData;
+
+/**
+ * Procedure described by a series of steps.
+ *
+ * The procedure implementor must have an enum of 'states', describing
+ * the various step of the procedure.
+ * Once the procedure is running, the procedure-framework will call executeFromState()
+ * using the 'state' provided by the user. The first call to executeFromState()
+ * will be performed with 'state = null'. The implementor can jump between
+ * states using setNextState(MyStateEnum.ordinal()).
+ * The rollback will call rollbackState() for each state that was executed, in reverse order.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class StateMachineProcedure<TEnvironment, TState>
+ extends Procedure<TEnvironment> {
+ private int stateCount = 0;
+ private int[] states = null;
+
+ protected enum Flow {
+ HAS_MORE_STATE,
+ NO_MORE_STATE,
+ }
+
+ /**
+ * called to perform a single step of the specified 'state' of the procedure
+ * @param state state to execute
+ * @return Flow.NO_MORE_STATE if the procedure is completed,
+ * Flow.HAS_MORE_STATE if there is another step.
+ */
+ protected abstract Flow executeFromState(TEnvironment env, TState state)
+ throws ProcedureYieldException;
+
+ /**
+ * called to perform the rollback of the specified state
+ * @param state state to rollback
+ * @throws IOException temporary failure, the rollback will retry later
+ */
+ protected abstract void rollbackState(TEnvironment env, TState state)
+ throws IOException;
+
+ /**
+ * Convert an ordinal (or state id) to an Enum (or more descriptive) state object.
+ * @param stateId the ordinal() of the state enum (or state id)
+ * @return the state enum object
+ */
+ protected abstract TState getState(int stateId);
+
+ /**
+ * Convert the Enum (or more descriptive) state object to an ordinal (or state id).
+ * @param state the state enum object
+ * @return stateId the ordinal() of the state enum (or state id)
+ */
+ protected abstract int getStateId(TState state);
+
+ /**
+ * Return the initial state object that will be used for the first call to executeFromState().
+ * @return the initial state enum object
+ */
+ protected abstract TState getInitialState();
+
+ /**
+ * Set the next state for the procedure.
+ * @param state the state enum object
+ */
+ protected void setNextState(final TState state) {
+ setNextState(getStateId(state));
+ }
+
+ @Override
+ protected Procedure[] execute(final TEnvironment env)
+ throws ProcedureYieldException {
+ updateTimestamp();
+ try {
+ TState state = stateCount > 0 ? getState(states[stateCount-1]) : getInitialState();
+ if (stateCount == 0) {
+ setNextState(getStateId(state));
+ }
+ if (executeFromState(env, state) == Flow.NO_MORE_STATE) {
+ // completed
+ return null;
+ }
+ return (isWaiting() || isFailed()) ? null : new Procedure[] {this};
+ } finally {
+ updateTimestamp();
+ }
+ }
+
+ @Override
+ protected void rollback(final TEnvironment env) throws IOException {
+ try {
+ updateTimestamp();
+ rollbackState(env, stateCount > 0 ? getState(states[stateCount-1]) : getInitialState());
+ stateCount--;
+ } finally {
+ updateTimestamp();
+ }
+ }
+
+ /**
+ * Set the next state for the procedure.
+ * @param stateId the ordinal() of the state enum (or state id)
+ */
+ private void setNextState(final int stateId) {
+ if (states == null || states.length == stateCount) {
+ int newCapacity = stateCount + 8;
+ if (states != null) {
+ states = Arrays.copyOf(states, newCapacity);
+ } else {
+ states = new int[newCapacity];
+ }
+ }
+ states[stateCount++] = stateId;
+ }
+
+ @Override
+ protected void serializeStateData(final OutputStream stream) throws IOException {
+ StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder();
+ for (int i = 0; i < stateCount; ++i) {
+ data.addState(states[i]);
+ }
+ data.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ protected void deserializeStateData(final InputStream stream) throws IOException {
+ StateMachineProcedureData data = StateMachineProcedureData.parseDelimitedFrom(stream);
+ stateCount = data.getStateCount();
+ if (stateCount > 0) {
+ states = new int[stateCount];
+ for (int i = 0; i < stateCount; ++i) {
+ states[i] = data.getState(i);
+ }
+ } else {
+ states = null;
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java
new file mode 100644
index 0000000..cd6b0a7
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class TwoPhaseProcedure<TEnvironment> extends Procedure<TEnvironment> {
+ // TODO (e.g. used by ACLs/VisibilityTags updates)
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
new file mode 100644
index 0000000..0d1c050
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+
+/**
+ * The ProcedureStore is used by the executor to persist the state of each procedure execution.
+ * This allows to resume the execution of pending/in-progress procedures in case
+ * of machine failure or service shutdown.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface ProcedureStore {
+ /**
+ * Store listener interface.
+ * The main process should register a listener and respond to the store events.
+ */
+ public interface ProcedureStoreListener {
+ /**
+ * triggered when the store is not able to write out data.
+ * the main process should abort.
+ */
+ void abortProcess();
+ }
+
+ /**
+ * Add the listener to the notification list.
+ * @param listener The AssignmentListener to register
+ */
+ void registerListener(ProcedureStoreListener listener);
+
+ /**
+ * Remove the listener from the notification list.
+ * @param listener The AssignmentListener to unregister
+ * @return true if the listner was in the list and it was removed, otherwise false.
+ */
+ boolean unregisterListener(ProcedureStoreListener listener);
+
+ /**
+ * Start/Open the procedure store
+ * @param numThreads
+ */
+ void start(int numThreads) throws IOException;
+
+ /**
+ * Stop/Close the procedure store
+ * @param abort true if the stop is an abort
+ */
+ void stop(boolean abort);
+
+ /**
+ * @return true if the store is running, otherwise false.
+ */
+ boolean isRunning();
+
+ /**
+ * @return the number of threads/slots passed to start()
+ */
+ int getNumThreads();
+
+ /**
+ * Acquire the lease for the procedure store.
+ */
+ void recoverLease() throws IOException;
+
+ /**
+ * Load the Procedures in the store.
+ * @return the set of procedures present in the store
+ */
+ Iterator<Procedure> load() throws IOException;
+
+ /**
+ * When a procedure is submitted to the executor insert(proc, null) will be called.
+ * 'proc' has a 'RUNNABLE' state and the initial information required to start up.
+ *
+ * When a procedure is executed and it returns children insert(proc, subprocs) will be called.
+ * 'proc' has a 'WAITING' state and an update state.
+ * 'subprocs' are the children in 'RUNNABLE' state with the initial information.
+ *
+ * @param proc the procedure to serialize and write to the store.
+ * @param subprocs the newly created child of the proc.
+ */
+ void insert(Procedure proc, Procedure[] subprocs);
+
+ /**
+ * The specified procedure was executed,
+ * and the new state should be written to the store.
+ * @param proc the procedure to serialize and write to the store.
+ */
+ void update(Procedure proc);
+
+ /**
+ * The specified procId was removed from the executor,
+ * due to completion, abort or failure.
+ * The store implementor should remove all the information about the specified procId.
+ * @param procId the ID of the procedure to remove.
+ */
+ void delete(long procId);
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
new file mode 100644
index 0000000..4e4653a
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -0,0 +1,540 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
+
+/**
+ * Keeps track of live procedures.
+ *
+ * It can be used by the ProcedureStore to identify which procedures are already
+ * deleted/completed to avoid the deserialization step on restart.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ProcedureStoreTracker {
+ private final TreeMap<Long, BitSetNode> map = new TreeMap<Long, BitSetNode>();
+
+ private boolean keepDeletes = false;
+ private boolean partial = false;
+
+ public enum DeleteState { YES, NO, MAYBE }
+
+ public static class BitSetNode {
+ private final static long WORD_MASK = 0xffffffffffffffffL;
+ private final static int ADDRESS_BITS_PER_WORD = 6;
+ private final static int BITS_PER_WORD = 1 << ADDRESS_BITS_PER_WORD;
+ private final static int MAX_NODE_SIZE = 4 << ADDRESS_BITS_PER_WORD;
+
+ private long[] updated;
+ private long[] deleted;
+ private long start;
+
+ public void dump() {
+ System.out.printf("%06d:%06d min=%d max=%d%n", getStart(), getEnd(),
+ getMinProcId(), getMaxProcId());
+ System.out.println("Update:");
+ for (int i = 0; i < updated.length; ++i) {
+ for (int j = 0; j < BITS_PER_WORD; ++j) {
+ System.out.print((updated[i] & (1L << j)) != 0 ? "1" : "0");
+ }
+ System.out.println(" " + i);
+ }
+ System.out.println();
+ System.out.println("Delete:");
+ for (int i = 0; i < deleted.length; ++i) {
+ for (int j = 0; j < BITS_PER_WORD; ++j) {
+ System.out.print((deleted[i] & (1L << j)) != 0 ? "1" : "0");
+ }
+ System.out.println(" " + i);
+ }
+ System.out.println();
+ }
+
+ public BitSetNode(final long procId, final boolean partial) {
+ start = alignDown(procId);
+
+ int count = 2;
+ updated = new long[count];
+ deleted = new long[count];
+ for (int i = 0; i < count; ++i) {
+ updated[i] = 0;
+ deleted[i] = partial ? 0 : WORD_MASK;
+ }
+
+ updateState(procId, false);
+ }
+
+ protected BitSetNode(final long start, final long[] updated, final long[] deleted) {
+ this.start = start;
+ this.updated = updated;
+ this.deleted = deleted;
+ }
+
+ public void update(final long procId) {
+ updateState(procId, false);
+ }
+
+ public void delete(final long procId) {
+ updateState(procId, true);
+ }
+
+ public Long getStart() {
+ return start;
+ }
+
+ public Long getEnd() {
+ return start + (updated.length << ADDRESS_BITS_PER_WORD) - 1;
+ }
+
+ public boolean contains(final long procId) {
+ return start <= procId && procId <= getEnd();
+ }
+
+ public DeleteState isDeleted(final long procId) {
+ int bitmapIndex = getBitmapIndex(procId);
+ int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD;
+ if (wordIndex >= deleted.length) {
+ return DeleteState.MAYBE;
+ }
+ return (deleted[wordIndex] & (1L << bitmapIndex)) != 0 ? DeleteState.YES : DeleteState.NO;
+ }
+
+ private boolean isUpdated(final long procId) {
+ int bitmapIndex = getBitmapIndex(procId);
+ int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD;
+ if (wordIndex >= updated.length) {
+ return false;
+ }
+ return (updated[wordIndex] & (1L << bitmapIndex)) != 0;
+ }
+
+ public boolean isUpdated() {
+ // TODO: cache the value
+ for (int i = 0; i < updated.length; ++i) {
+ long deleteMask = ~deleted[i];
+ if ((updated[i] & deleteMask) != (WORD_MASK & deleteMask)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public boolean isEmpty() {
+ // TODO: cache the value
+ for (int i = 0; i < deleted.length; ++i) {
+ if (deleted[i] != WORD_MASK) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public void resetUpdates() {
+ for (int i = 0; i < updated.length; ++i) {
+ updated[i] = 0;
+ }
+ }
+
+ public void undeleteAll() {
+ for (int i = 0; i < updated.length; ++i) {
+ deleted[i] = 0;
+ }
+ }
+
+ public ProcedureProtos.ProcedureStoreTracker.TrackerNode convert() {
+ ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder =
+ ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder();
+ builder.setStartId(start);
+ for (int i = 0; i < updated.length; ++i) {
+ builder.addUpdated(updated[i]);
+ builder.addDeleted(deleted[i]);
+ }
+ return builder.build();
+ }
+
+ public static BitSetNode convert(ProcedureProtos.ProcedureStoreTracker.TrackerNode data) {
+ long start = data.getStartId();
+ int size = data.getUpdatedCount();
+ long[] updated = new long[size];
+ long[] deleted = new long[size];
+ for (int i = 0; i < size; ++i) {
+ updated[i] = data.getUpdated(i);
+ deleted[i] = data.getDeleted(i);
+ }
+ return new BitSetNode(start, updated, deleted);
+ }
+
+ // ========================================================================
+ // Grow/Merge Helpers
+ // ========================================================================
+ public boolean canGrow(final long procId) {
+ return (procId - start) < MAX_NODE_SIZE;
+ }
+
+ public boolean canMerge(final BitSetNode rightNode) {
+ return (start + rightNode.getEnd()) < MAX_NODE_SIZE;
+ }
+
+ public void grow(final long procId) {
+ int delta, offset;
+
+ if (procId < start) {
+ // add to head
+ long newStart = alignDown(procId);
+ delta = (int)(start - newStart) >> ADDRESS_BITS_PER_WORD;
+ offset = delta;
+ } else {
+ // Add to tail
+ long newEnd = alignUp(procId + 1);
+ delta = (int)(newEnd - getEnd()) >> ADDRESS_BITS_PER_WORD;
+ offset = 0;
+ }
+
+ long[] newBitmap;
+ int oldSize = updated.length;
+
+ newBitmap = new long[oldSize + delta];
+ System.arraycopy(updated, 0, newBitmap, offset, oldSize);
+ updated = newBitmap;
+
+ newBitmap = new long[deleted.length + delta];
+ System.arraycopy(deleted, 0, newBitmap, offset, oldSize);
+ deleted = newBitmap;
+
+ for (int i = 0; i < delta; ++i) {
+ updated[oldSize + i] = 0;
+ deleted[oldSize + i] = WORD_MASK;
+ }
+ }
+
+ public void merge(final BitSetNode rightNode) {
+ int delta = (int)(rightNode.getEnd() - getEnd()) >> ADDRESS_BITS_PER_WORD;
+
+ long[] newBitmap;
+ int oldSize = updated.length;
+ int newSize = (delta - rightNode.updated.length);
+ int offset = oldSize + newSize;
+
+ newBitmap = new long[oldSize + delta];
+ System.arraycopy(updated, 0, newBitmap, 0, oldSize);
+ System.arraycopy(rightNode.updated, 0, newBitmap, offset, rightNode.updated.length);
+ updated = newBitmap;
+
+ newBitmap = new long[oldSize + delta];
+ System.arraycopy(deleted, 0, newBitmap, 0, oldSize);
+ System.arraycopy(rightNode.deleted, 0, newBitmap, offset, rightNode.deleted.length);
+ deleted = newBitmap;
+
+ for (int i = 0; i < newSize; ++i) {
+ updated[offset + i] = 0;
+ deleted[offset + i] = WORD_MASK;
+ }
+ }
+
+ // ========================================================================
+ // Min/Max Helpers
+ // ========================================================================
+ public long getMinProcId() {
+ long minProcId = start;
+ for (int i = 0; i < deleted.length; ++i) {
+ if (deleted[i] == 0) {
+ return(minProcId);
+ }
+
+ if (deleted[i] != WORD_MASK) {
+ for (int j = 0; j < BITS_PER_WORD; ++j) {
+ if ((deleted[i] & (1L << j)) != 0) {
+ return minProcId + j;
+ }
+ }
+ }
+
+ minProcId += BITS_PER_WORD;
+ }
+ return minProcId;
+ }
+
+ public long getMaxProcId() {
+ long maxProcId = getEnd();
+ for (int i = deleted.length - 1; i >= 0; --i) {
+ if (deleted[i] == 0) {
+ return maxProcId;
+ }
+
+ if (deleted[i] != WORD_MASK) {
+ for (int j = BITS_PER_WORD - 1; j >= 0; --j) {
+ if ((deleted[i] & (1L << j)) == 0) {
+ return maxProcId - (BITS_PER_WORD - 1 - j);
+ }
+ }
+ }
+ maxProcId -= BITS_PER_WORD;
+ }
+ return maxProcId;
+ }
+
+ // ========================================================================
+ // Bitmap Helpers
+ // ========================================================================
+ private int getBitmapIndex(final long procId) {
+ return (int)(procId - start);
+ }
+
+ private void updateState(final long procId, final boolean isDeleted) {
+ int bitmapIndex = getBitmapIndex(procId);
+ int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD;
+ long value = (1L << bitmapIndex);
+
+ if (isDeleted) {
+ updated[wordIndex] |= value;
+ deleted[wordIndex] |= value;
+ } else {
+ updated[wordIndex] |= value;
+ deleted[wordIndex] &= ~value;
+ }
+ }
+
+ // ========================================================================
+ // Helpers
+ // ========================================================================
+ private static long alignUp(final long x) {
+ return (x + (BITS_PER_WORD - 1)) & -BITS_PER_WORD;
+ }
+
+ private static long alignDown(final long x) {
+ return x & -BITS_PER_WORD;
+ }
+ }
+
+ public void insert(final Procedure proc, final Procedure[] subprocs) {
+ insert(proc.getProcId());
+ if (subprocs != null) {
+ for (int i = 0; i < subprocs.length; ++i) {
+ insert(subprocs[i].getProcId());
+ }
+ }
+ }
+
+ public void update(final Procedure proc) {
+ update(proc.getProcId());
+ }
+
+ public void insert(long procId) {
+ BitSetNode node = getOrCreateNode(procId);
+ node.update(procId);
+ }
+
+ public void update(long procId) {
+ Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
+ assert entry != null : "expected node to update procId=" + procId;
+
+ BitSetNode node = entry.getValue();
+ assert node.contains(procId);
+ node.update(procId);
+ }
+
+ public void delete(long procId) {
+ Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
+ assert entry != null : "expected node to delete procId=" + procId;
+
+ BitSetNode node = entry.getValue();
+ assert node.contains(procId) : "expected procId in the node";
+ node.delete(procId);
+
+ if (!keepDeletes && node.isEmpty()) {
+ // TODO: RESET if (map.size() == 1)
+ map.remove(entry.getKey());
+ }
+ }
+
+ @InterfaceAudience.Private
+ public void setDeleted(final long procId, final boolean isDeleted) {
+ BitSetNode node = getOrCreateNode(procId);
+ node.updateState(procId, isDeleted);
+ }
+
+ public void clear() {
+ this.map.clear();
+ }
+
+ public DeleteState isDeleted(long procId) {
+ Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
+ if (entry != null) {
+ BitSetNode node = entry.getValue();
+ DeleteState state = node.isDeleted(procId);
+ return partial && !node.isUpdated(procId) ? DeleteState.MAYBE : state;
+ }
+ return partial ? DeleteState.MAYBE : DeleteState.YES;
+ }
+
+ public long getMinProcId() {
+ // TODO: Cache?
+ Map.Entry<Long, BitSetNode> entry = map.firstEntry();
+ return entry == null ? 0 : entry.getValue().getMinProcId();
+ }
+
+ public void setKeepDeletes(boolean keepDeletes) {
+ this.keepDeletes = keepDeletes;
+ if (!keepDeletes) {
+ Iterator<Map.Entry<Long, BitSetNode>> it = map.entrySet().iterator();
+ while (it.hasNext()) {
+ Map.Entry<Long, BitSetNode> entry = it.next();
+ if (entry.getValue().isEmpty()) {
+ it.remove();
+ }
+ }
+ }
+ }
+
+ public void setPartialFlag(boolean isPartial) {
+ this.partial = isPartial;
+ }
+
+ public boolean isEmpty() {
+ for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
+ if (entry.getValue().isEmpty() == false) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public boolean isUpdated() {
+ for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
+ if (entry.getValue().isUpdated() == false) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public void resetUpdates() {
+ for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
+ entry.getValue().resetUpdates();
+ }
+ }
+
+ public void undeleteAll() {
+ for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
+ entry.getValue().undeleteAll();
+ }
+ }
+
+ private BitSetNode getOrCreateNode(final long procId) {
+ // can procId fit in the left node?
+ BitSetNode leftNode = null;
+ boolean leftCanGrow = false;
+ Map.Entry<Long, BitSetNode> leftEntry = map.floorEntry(procId);
+ if (leftEntry != null) {
+ leftNode = leftEntry.getValue();
+ if (leftNode.contains(procId)) {
+ return leftNode;
+ }
+ leftCanGrow = leftNode.canGrow(procId);
+ }
+
+ BitSetNode rightNode = null;
+ boolean rightCanGrow = false;
+ Map.Entry<Long, BitSetNode> rightEntry = map.ceilingEntry(procId);
+ if (rightEntry != null) {
+ rightNode = rightEntry.getValue();
+ rightCanGrow = rightNode.canGrow(procId);
+ if (leftNode != null) {
+ if (leftNode.canMerge(rightNode)) {
+ // merge left and right node
+ return mergeNodes(leftNode, rightNode);
+ }
+
+ if (leftCanGrow && rightCanGrow) {
+ if ((procId - leftNode.getEnd()) <= (rightNode.getStart() - procId)) {
+ // grow the left node
+ return growNode(leftNode, procId);
+ }
+ // grow the right node
+ return growNode(rightNode, procId);
+ }
+ }
+ }
+
+ // grow the left node
+ if (leftCanGrow) {
+ return growNode(leftNode, procId);
+ }
+
+ // grow the right node
+ if (rightCanGrow) {
+ return growNode(rightNode, procId);
+ }
+
+ // add new node
+ BitSetNode node = new BitSetNode(procId, partial);
+ map.put(node.getStart(), node);
+ return node;
+ }
+
+ private BitSetNode growNode(BitSetNode node, long procId) {
+ map.remove(node.getStart());
+ node.grow(procId);
+ map.put(node.getStart(), node);
+ return node;
+ }
+
+ private BitSetNode mergeNodes(BitSetNode leftNode, BitSetNode rightNode) {
+ leftNode.merge(rightNode);
+ map.remove(rightNode.getStart());
+ return leftNode;
+ }
+
+ public void dump() {
+ System.out.println("map " + map.size());
+ for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
+ entry.getValue().dump();
+ }
+ }
+
+ public void writeTo(final OutputStream stream) throws IOException {
+ ProcedureProtos.ProcedureStoreTracker.Builder builder =
+ ProcedureProtos.ProcedureStoreTracker.newBuilder();
+ for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) {
+ builder.addNode(entry.getValue().convert());
+ }
+ builder.build().writeDelimitedTo(stream);
+ }
+
+ public void readFrom(final InputStream stream) throws IOException {
+ ProcedureProtos.ProcedureStoreTracker data =
+ ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream);
+ map.clear();
+ for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: data.getNodeList()) {
+ BitSetNode node = BitSetNode.convert(protoNode);
+ map.put(node.getStart(), node);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
new file mode 100644
index 0000000..29db3bf
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Thrown when a procedure WAL is corrupted
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class CorruptedWALProcedureStoreException extends HBaseIOException {
+ /** default constructor */
+ public CorruptedWALProcedureStoreException() {
+ super();
+ }
+
+ /**
+ * Constructor
+ * @param s message
+ */
+ public CorruptedWALProcedureStoreException(String s) {
+ super(s);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
new file mode 100644
index 0000000..859b3cb
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer;
+
+/**
+ * Describes a WAL File
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ProcedureWALFile implements Comparable<ProcedureWALFile> {
+ private static final Log LOG = LogFactory.getLog(ProcedureWALFile.class);
+
+ private ProcedureWALHeader header;
+ private FSDataInputStream stream;
+ private FileStatus logStatus;
+ private FileSystem fs;
+ private Path logFile;
+ private long startPos;
+
+ public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) {
+ this.fs = fs;
+ this.logStatus = logStatus;
+ this.logFile = logStatus.getPath();
+ }
+
+ public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos) {
+ this.fs = fs;
+ this.logFile = logFile;
+ this.header = header;
+ this.startPos = startPos;
+ }
+
+ public void open() throws IOException {
+ if (stream == null) {
+ stream = fs.open(logFile);
+ }
+
+ if (header == null) {
+ header = ProcedureWALFormat.readHeader(stream);
+ startPos = stream.getPos();
+ } else {
+ stream.seek(startPos);
+ }
+ }
+
+ public ProcedureWALTrailer readTrailer() throws IOException {
+ try {
+ return ProcedureWALFormat.readTrailer(stream, startPos, logStatus.getLen());
+ } finally {
+ stream.seek(startPos);
+ }
+ }
+
+ public void readTracker(ProcedureStoreTracker tracker) throws IOException {
+ ProcedureWALTrailer trailer = readTrailer();
+ try {
+ stream.seek(trailer.getTrackerPos());
+ tracker.readFrom(stream);
+ } finally {
+ stream.seek(startPos);
+ }
+ }
+
+ public void close() {
+ if (stream == null) return;
+ try {
+ stream.close();
+ } catch (IOException e) {
+ LOG.warn("unable to close the wal file: " + logFile, e);
+ } finally {
+ stream = null;
+ }
+ }
+
+ public FSDataInputStream getStream() {
+ return stream;
+ }
+
+ public ProcedureWALHeader getHeader() {
+ return header;
+ }
+
+ public boolean isCompacted() {
+ return header.getType() == ProcedureWALFormat.LOG_TYPE_COMPACTED;
+ }
+
+ public long getLogId() {
+ return header.getLogId();
+ }
+
+ public long getSize() {
+ return logStatus.getLen();
+ }
+
+ public void removeFile() throws IOException {
+ close();
+ fs.delete(logFile, false);
+ }
+
+ @Override
+ public int compareTo(final ProcedureWALFile other) {
+ long diff = header.getLogId() - other.header.getLogId();
+ return (diff < 0) ? -1 : (diff > 0) ? 1 : 0;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (!(o instanceof ProcedureWALFile)) return false;
+ return compareTo((ProcedureWALFile)o) == 0;
+ }
+
+ @Override
+ public int hashCode() {
+ return logFile.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return logFile.toString();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
new file mode 100644
index 0000000..17432ac
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.hbase.io.util.StreamUtils;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
+import org.apache.hadoop.hbase.procedure2.util.ByteSlot;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * Helper class that contains the WAL serialization utils.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class ProcedureWALFormat {
+ static final byte LOG_TYPE_STREAM = 0;
+ static final byte LOG_TYPE_COMPACTED = 1;
+ static final byte LOG_TYPE_MAX_VALID = 1;
+
+ static final byte HEADER_VERSION = 1;
+ static final byte TRAILER_VERSION = 1;
+ static final long HEADER_MAGIC = 0x31764c4157637250L;
+ static final long TRAILER_MAGIC = 0x50726357414c7631L;
+
+ @InterfaceAudience.Private
+ public static class InvalidWALDataException extends IOException {
+ public InvalidWALDataException(String s) {
+ super(s);
+ }
+
+ public InvalidWALDataException(Throwable t) {
+ super(t);
+ }
+ }
+
+ interface Loader {
+ void removeLog(ProcedureWALFile log);
+ void markCorruptedWAL(ProcedureWALFile log, IOException e);
+ }
+
+ private ProcedureWALFormat() {}
+
+ public static Iterator<Procedure> load(final Iterator<ProcedureWALFile> logs,
+ final ProcedureStoreTracker tracker, final Loader loader) throws IOException {
+ ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker);
+ tracker.setKeepDeletes(true);
+ try {
+ while (logs.hasNext()) {
+ ProcedureWALFile log = logs.next();
+ log.open();
+ try {
+ reader.read(log, loader);
+ } finally {
+ log.close();
+ }
+ }
+ // The tracker is now updated with all the procedures read from the logs
+ tracker.setPartialFlag(false);
+ tracker.resetUpdates();
+ } finally {
+ tracker.setKeepDeletes(false);
+ }
+ // TODO: Write compacted version?
+ return reader.getProcedures();
+ }
+
+ public static void writeHeader(OutputStream stream, ProcedureWALHeader header)
+ throws IOException {
+ header.writeDelimitedTo(stream);
+ }
+
+ /*
+ * +-----------------+
+ * | END OF WAL DATA | <---+
+ * +-----------------+ |
+ * | | |
+ * | Tracker | |
+ * | | |
+ * +-----------------+ |
+ * | version | |
+ * +-----------------+ |
+ * | TRAILER_MAGIC | |
+ * +-----------------+ |
+ * | offset |-----+
+ * +-----------------+
+ */
+ public static void writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
+ throws IOException {
+ long offset = stream.getPos();
+
+ // Write EOF Entry
+ ProcedureWALEntry.newBuilder()
+ .setType(ProcedureWALEntry.Type.EOF)
+ .build().writeDelimitedTo(stream);
+
+ // Write Tracker
+ tracker.writeTo(stream);
+
+ stream.write(TRAILER_VERSION);
+ StreamUtils.writeLong(stream, TRAILER_MAGIC);
+ StreamUtils.writeLong(stream, offset);
+ }
+
+ public static ProcedureWALHeader readHeader(InputStream stream)
+ throws IOException {
+ ProcedureWALHeader header;
+ try {
+ header = ProcedureWALHeader.parseDelimitedFrom(stream);
+ } catch (InvalidProtocolBufferException e) {
+ throw new InvalidWALDataException(e);
+ }
+
+ if (header == null) {
+ throw new InvalidWALDataException("No data available to read the Header");
+ }
+
+ if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) {
+ throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() +
+ " expected " + HEADER_VERSION);
+ }
+
+ if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) {
+ throw new InvalidWALDataException("Invalid header type. got " + header.getType());
+ }
+
+ return header;
+ }
+
+ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long startPos, long size)
+ throws IOException {
+ long trailerPos = size - 17; // Beginning of the Trailer Jump
+
+ if (trailerPos < startPos) {
+ throw new InvalidWALDataException("Missing trailer: size=" + size + " startPos=" + startPos);
+ }
+
+ stream.seek(trailerPos);
+ int version = stream.read();
+ if (version != TRAILER_VERSION) {
+ throw new InvalidWALDataException("Invalid Trailer version. got " + version +
+ " expected " + TRAILER_VERSION);
+ }
+
+ long magic = StreamUtils.readLong(stream);
+ if (magic != TRAILER_MAGIC) {
+ throw new InvalidWALDataException("Invalid Trailer magic. got " + magic +
+ " expected " + TRAILER_MAGIC);
+ }
+
+ long trailerOffset = StreamUtils.readLong(stream);
+ stream.seek(trailerOffset);
+
+ ProcedureWALEntry entry = readEntry(stream);
+ if (entry.getType() != ProcedureWALEntry.Type.EOF) {
+ throw new InvalidWALDataException("Invalid Trailer begin");
+ }
+
+ ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder()
+ .setVersion(version)
+ .setTrackerPos(stream.getPos())
+ .build();
+ return trailer;
+ }
+
+ public static ProcedureWALEntry readEntry(InputStream stream) throws IOException {
+ return ProcedureWALEntry.parseDelimitedFrom(stream);
+ }
+
+ public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type,
+ Procedure proc, Procedure[] subprocs) throws IOException {
+ ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder();
+ builder.setType(type);
+ builder.addProcedure(Procedure.convert(proc));
+ if (subprocs != null) {
+ for (int i = 0; i < subprocs.length; ++i) {
+ builder.addProcedure(Procedure.convert(subprocs[i]));
+ }
+ }
+ builder.build().writeDelimitedTo(slot);
+ }
+
+ public static void writeInsert(ByteSlot slot, Procedure proc)
+ throws IOException {
+ writeEntry(slot, ProcedureWALEntry.Type.INIT, proc, null);
+ }
+
+ public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] subprocs)
+ throws IOException {
+ writeEntry(slot, ProcedureWALEntry.Type.INSERT, proc, subprocs);
+ }
+
+ public static void writeUpdate(ByteSlot slot, Procedure proc)
+ throws IOException {
+ writeEntry(slot, ProcedureWALEntry.Type.UPDATE, proc, null);
+ }
+
+ public static void writeDelete(ByteSlot slot, long procId)
+ throws IOException {
+ ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder();
+ builder.setType(ProcedureWALEntry.Type.DELETE);
+ builder.setProcId(procId);
+ builder.build().writeDelimitedTo(slot);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/04246c6c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
new file mode 100644
index 0000000..a60b8f5
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry;
+
+/**
+ * Helper class that loads the procedures stored in a WAL
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ProcedureWALFormatReader {
+ private static final Log LOG = LogFactory.getLog(ProcedureWALFormatReader.class);
+
+ private final ProcedureStoreTracker tracker;
+ //private final long compactionLogId;
+
+ private final Map<Long, Procedure> procedures = new HashMap<Long, Procedure>();
+ private final Map<Long, ProcedureProtos.Procedure> localProcedures =
+ new HashMap<Long, ProcedureProtos.Procedure>();
+
+ private long maxProcId = 0;
+
+ public ProcedureWALFormatReader(final ProcedureStoreTracker tracker) {
+ this.tracker = tracker;
+ }
+
+ public void read(ProcedureWALFile log, ProcedureWALFormat.Loader loader) throws IOException {
+ FSDataInputStream stream = log.getStream();
+ try {
+ boolean hasMore = true;
+ while (hasMore) {
+ ProcedureWALEntry entry = ProcedureWALFormat.readEntry(stream);
+ if (entry == null) {
+ LOG.warn("nothing left to decode. exiting with missing EOF");
+ hasMore = false;
+ break;
+ }
+ switch (entry.getType()) {
+ case INIT:
+ readInitEntry(entry);
+ break;
+ case INSERT:
+ readInsertEntry(entry);
+ break;
+ case UPDATE:
+ case COMPACT:
+ readUpdateEntry(entry);
+ break;
+ case DELETE:
+ readDeleteEntry(entry);
+ break;
+ case EOF:
+ hasMore = false;
+ break;
+ default:
+ throw new CorruptedWALProcedureStoreException("Invalid entry: " + entry);
+ }
+ }
+ } catch (IOException e) {
+ LOG.error("got an exception while reading the procedure WAL: " + log, e);
+ loader.markCorruptedWAL(log, e);
+ }
+
+ if (localProcedures.isEmpty()) {
+ LOG.info("No active entry found in state log " + log + ". removing it");
+ loader.removeLog(log);
+ } else {
+ Iterator<Map.Entry<Long, ProcedureProtos.Procedure>> itd =
+ localProcedures.entrySet().iterator();
+ while (itd.hasNext()) {
+ Map.Entry<Long, ProcedureProtos.Procedure> entry = itd.next();
+ itd.remove();
+
+ // Deserialize the procedure
+ Procedure proc = Procedure.convert(entry.getValue());
+ procedures.put(entry.getKey(), proc);
+ }
+
+ // TODO: Some procedure may be already runnables (see readInitEntry())
+ // (we can also check the "update map" in the log trackers)
+ }
+ }
+
+ public Iterator<Procedure> getProcedures() {
+ return procedures.values().iterator();
+ }
+
+ private void loadEntries(final ProcedureWALEntry entry) {
+ for (ProcedureProtos.Procedure proc: entry.getProcedureList()) {
+ maxProcId = Math.max(maxProcId, proc.getProcId());
+ if (isRequired(proc.getProcId())) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("read " + entry.getType() + " entry " + proc.getProcId());
+ }
+ localProcedures.put(proc.getProcId(), proc);
+ tracker.setDeleted(proc.getProcId(), false);
+ }
+ }
+ }
+
+ private void readInitEntry(final ProcedureWALEntry entry)
+ throws IOException {
+ assert entry.getProcedureCount() == 1 : "Expected only one procedure";
+ // TODO: Make it runnable, before reading other files
+ loadEntries(entry);
+ }
+
+ private void readInsertEntry(final ProcedureWALEntry entry) throws IOException {
+ assert entry.getProcedureCount() >= 1 : "Expected one or more procedures";
+ loadEntries(entry);
+ }
+
+ private void readUpdateEntry(final ProcedureWALEntry entry) throws IOException {
+ assert entry.getProcedureCount() == 1 : "Expected only one procedure";
+ loadEntries(entry);
+ }
+
+ private void readDeleteEntry(final ProcedureWALEntry entry) throws IOException {
+ assert entry.getProcedureCount() == 0 : "Expected no procedures";
+ assert entry.hasProcId() : "expected ProcID";
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("read delete entry " + entry.getProcId());
+ }
+ maxProcId = Math.max(maxProcId, entry.getProcId());
+ localProcedures.remove(entry.getProcId());
+ tracker.setDeleted(entry.getProcId(), true);
+ }
+
+ private boolean isDeleted(final long procId) {
+ return tracker.isDeleted(procId) == ProcedureStoreTracker.DeleteState.YES;
+ }
+
+ private boolean isRequired(final long procId) {
+ return !isDeleted(procId) && !procedures.containsKey(procId);
+ }
+}
\ No newline at end of file
[38/50] [abbrv] hbase git commit: HBASE-13475 Small spelling mistake
in region_mover#isSuccessfulScan causes NoMethodError (Victor Xu)
Posted by jm...@apache.org.
HBASE-13475 Small spelling mistake in region_mover#isSuccessfulScan causes NoMethodError (Victor Xu)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d6926629
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d6926629
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d6926629
Branch: refs/heads/hbase-11339
Commit: d6926629f919589c4a0696e6eca98e696093d90b
Parents: 4788c6d
Author: tedyu <yu...@gmail.com>
Authored: Wed Apr 15 05:05:37 2015 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Wed Apr 15 05:05:37 2015 -0700
----------------------------------------------------------------------
bin/region_mover.rb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d6926629/bin/region_mover.rb
----------------------------------------------------------------------
diff --git a/bin/region_mover.rb b/bin/region_mover.rb
index cd0f173..3259564 100644
--- a/bin/region_mover.rb
+++ b/bin/region_mover.rb
@@ -100,7 +100,7 @@ def isSuccessfulScan(admin, r)
scan = Scan.new(r.getStartKey(), r.getStartKey())
scan.setBatch(1)
scan.setCaching(1)
- scan.setFilter(FilterList.new(FirstKeyOnlyFilter.new(),InclusiveStopFilter().new(r.getStartKey())))
+ scan.setFilter(FilterList.new(FirstKeyOnlyFilter.new(),InclusiveStopFilter.new(r.getStartKey())))
begin
table = HTable.new(admin.getConfiguration(), r.getTableName())
scanner = table.getScanner(scan)
[23/50] [abbrv] hbase git commit: HBASE-13209 Procedure V2 - master
Add/Modify/Delete Column Family (Stephen Yuan Jiang)
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
new file mode 100644
index 0000000..dcf1940
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestDeleteColumnFamilyProcedure {
+ private static final Log LOG = LogFactory.getLog(TestDeleteColumnFamilyProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testDeleteColumnFamily() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteColumnFamily");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ final String cf1 = "cf1";
+ final String cf2 = "cf2";
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, cf2, "f3");
+
+ // Test 1: delete the column family that exists online
+ long procId1 =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf1.getBytes()));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf1);
+
+ // Test 2: delete the column family that exists offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ long procId2 =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf2.getBytes()));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteColumnFamilyTwice() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteColumnFamilyTwice");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ final String cf2 = "cf2";
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", cf2);
+
+ // delete the column family that exists
+ long procId1 =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf2.getBytes()));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ // First delete should succeed
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf2);
+
+ // delete the column family that does not exist
+ long procId2 =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf2.getBytes()));
+
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+
+ // Second delete should fail with InvalidFamilyOperationException
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete online failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException);
+
+ // Try again, this time with table disabled.
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ long procId3 =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf2.getBytes()));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId3);
+ // Expect fail with InvalidFamilyOperationException
+ result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete offline failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException);
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteNonExistingColumnFamily() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteNonExistingColumnFamily");
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ final String cf3 = "cf3";
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // delete the column family that does not exist
+ long procId1 =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf3.getBytes()));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+
+ ProcedureResult result = procExec.getResult(procId1);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
+ final String cf4 = "cf4";
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3", cf4);
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf4.getBytes()));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = DeleteColumnFamilyState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ DeleteColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf4);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
+ final String cf5 = "cf5";
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3", cf5);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, cf5.getBytes()));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = DeleteColumnFamilyState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ DeleteColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),
+ tableName, cf5);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String cf5 = "cf5";
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "f1", "f2", "f3", cf5);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes()));
+
+ // Failing before DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT we should trigger the rollback
+ // NOTE: the 1 (number before DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT step) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ int numberOfSteps = 1;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ DeleteColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2", "f3", cf5);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
+ final String cf5 = "cf5";
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "f1", "f2", "f3", cf5);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes()));
+
+ // Failing after DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT we should not trigger the rollback.
+ // NOTE: the 4 (number of DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT + 1 step) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ int numberOfSteps = 4;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
+ procExec,
+ procId,
+ numberOfSteps,
+ DeleteColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, cf5);
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4ae8b8cc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java
new file mode 100644
index 0000000..d29ea25
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java
@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestModifyColumnFamilyProcedure {
+ private static final Log LOG = LogFactory.getLog(TestModifyColumnFamilyProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testModifyColumnFamily() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyColumnFamily");
+ final String cf1 = "cf1";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf1);
+ int oldBlockSize = columnDescriptor.getBlocksize();
+ int newBlockSize = 3 * oldBlockSize;
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, "f2");
+
+ // Test 1: modify the column family online
+ columnDescriptor.setBlocksize(newBlockSize);
+ long procId1 = procExec.submitProcedure(new ModifyColumnFamilyProcedure(
+ procExec.getEnvironment(), tableName, columnDescriptor));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+ MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster()
+ .getMaster(), tableName, cf1, columnDescriptor);
+
+ // Test 2: modify the column family offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ columnDescriptor.setBlocksize(newBlockSize * 2);
+ long procId2 =
+ procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, columnDescriptor));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+ MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster()
+ .getMaster(), tableName, cf1, columnDescriptor);
+ }
+
+ @Test(timeout=60000)
+ public void testModifyNonExistingColumnFamily() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyExistingColumnFamily");
+ final String cf2 = "cf2";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf2);
+ int oldBlockSize = columnDescriptor.getBlocksize();
+ int newBlockSize = 2 * oldBlockSize;
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1");
+
+ // Modify the column family that does not exist
+ columnDescriptor.setBlocksize(newBlockSize);
+ long procId1 = procExec.submitProcedure(new ModifyColumnFamilyProcedure(
+ procExec.getEnvironment(), tableName, columnDescriptor));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+
+ ProcedureResult result = procExec.getResult(procId1);
+ assertTrue(result.isFailed());
+ LOG.debug("Modify failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
+ final String cf3 = "cf3";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf3);
+ int oldBlockSize = columnDescriptor.getBlocksize();
+ int newBlockSize = 4 * oldBlockSize;
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf3);
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Modify procedure && kill the executor
+ columnDescriptor.setBlocksize(newBlockSize);
+ long procId = procExec.submitProcedure(new ModifyColumnFamilyProcedure(
+ procExec.getEnvironment(), tableName, columnDescriptor));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyColumnFamilyState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster()
+ .getMaster(), tableName, cf3, columnDescriptor);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
+ final String cf4 = "cf4";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4);
+ int oldBlockSize = columnDescriptor.getBlocksize();
+ int newBlockSize = 4 * oldBlockSize;
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf4);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Modify procedure && kill the executor
+ columnDescriptor.setBlocksize(newBlockSize);
+ long procId =
+ procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(),
+ tableName, columnDescriptor));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyColumnFamilyState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ ModifyColumnFamilyState.values());
+
+ MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster()
+ .getMaster(), tableName, cf4, columnDescriptor);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String cf3 = "cf3";
+ final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf3);
+ int oldBlockSize = columnDescriptor.getBlocksize();
+ int newBlockSize = 4 * oldBlockSize;
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ // create the table
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf3);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Modify procedure && kill the executor
+ columnDescriptor.setBlocksize(newBlockSize);
+ long procId = procExec.submitProcedure(new ModifyColumnFamilyProcedure(
+ procExec.getEnvironment(), tableName, columnDescriptor));
+
+ // Failing in the middle of proc
+ int numberOfSteps = ModifyColumnFamilyState.values().length - 2;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyColumnFamilyState.values());
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
[22/50] [abbrv] hbase git commit: HBASE-13210 Procedure V2 - master
Modify table (Stephen Yuan Jiang)
Posted by jm...@apache.org.
HBASE-13210 Procedure V2 - master Modify table (Stephen Yuan Jiang)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f538336
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f538336
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f538336
Branch: refs/heads/hbase-11339
Commit: 7f5383368b459913129bc13060e53b5cbb5d4e77
Parents: 6a6e3f4
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Thu Apr 9 21:06:30 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Apr 10 18:53:43 2015 +0100
----------------------------------------------------------------------
.../generated/MasterProcedureProtos.java | 1587 ++++++++++++++++--
.../src/main/protobuf/MasterProcedure.proto | 17 +
.../org/apache/hadoop/hbase/master/HMaster.java | 32 +-
.../hadoop/hbase/master/MasterFileSystem.java | 10 +-
.../procedure/MasterDDLOperationHelper.java | 167 ++
.../master/procedure/ModifyTableProcedure.java | 512 ++++++
.../procedure/TestModifyTableProcedure.java | 403 +++++
7 files changed, 2539 insertions(+), 189 deletions(-)
----------------------------------------------------------------------
[27/50] [abbrv] hbase git commit: HBASE-13436 Include user name in
ADE for scans
Posted by jm...@apache.org.
HBASE-13436 Include user name in ADE for scans
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1890bffc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1890bffc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1890bffc
Branch: refs/heads/hbase-11339
Commit: 1890bffce3919bfd6fbe143eae99bd3c0bd3b829
Parents: f651206
Author: Srikanth Srungarapu <ss...@cloudera.com>
Authored: Fri Apr 10 13:44:19 2015 -0700
Committer: Srikanth Srungarapu <ss...@cloudera.com>
Committed: Fri Apr 10 13:44:19 2015 -0700
----------------------------------------------------------------------
.../apache/hadoop/hbase/security/access/AccessController.java | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/1890bffc/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 7b306c0..03b5e39 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -1546,8 +1546,9 @@ public class AccessController extends BaseMasterAndRegionObserver
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
- throw new AccessDeniedException("Insufficient permissions (table=" + table +
- ", action=READ)");
+ throw new AccessDeniedException("Insufficient permissions for user '"
+ + (user != null ? user.getShortName() : "null")
+ + "' (table=" + table + ", action=READ)");
}
}
[39/50] [abbrv] hbase git commit: HBASE-13460. Revise the
MetaLookupPool executor-related defaults (introduced in HBASE-13036).
Posted by jm...@apache.org.
HBASE-13460. Revise the MetaLookupPool executor-related defaults (introduced in HBASE-13036).
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d314f7d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d314f7d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d314f7d9
Branch: refs/heads/hbase-11339
Commit: d314f7d9e018ca8a67e0601a2de9b2bd0a08a5dc
Parents: d692662
Author: Devaraj Das <dd...@apache.org>
Authored: Wed Apr 15 07:56:25 2015 -0700
Committer: Devaraj Das <dd...@apache.org>
Committed: Wed Apr 15 07:56:25 2015 -0700
----------------------------------------------------------------------
.../hbase/client/ConnectionImplementation.java | 25 ++++++++++++--------
1 file changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/d314f7d9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index bc2d51a..a51a4ac 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -79,6 +79,7 @@ import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
+import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
@@ -364,7 +365,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
synchronized (this) {
if (batchPool == null) {
this.batchPool = getThreadPool(conf.getInt("hbase.hconnection.threads.max", 256),
- conf.getInt("hbase.hconnection.threads.core", 256), "-shared-");
+ conf.getInt("hbase.hconnection.threads.core", 256), "-shared-", null);
this.cleanupPool = true;
}
}
@@ -372,7 +373,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
return this.batchPool;
}
- private ExecutorService getThreadPool(int maxThreads, int coreThreads, String nameHint) {
+ private ExecutorService getThreadPool(int maxThreads, int coreThreads, String nameHint,
+ BlockingQueue<Runnable> passedWorkQueue) {
// shared HTable thread executor not yet initialized
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
@@ -381,10 +383,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
coreThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
- LinkedBlockingQueue<Runnable> workQueue =
+ BlockingQueue<Runnable> workQueue = passedWorkQueue;
+ if (workQueue == null) {
+ workQueue =
new LinkedBlockingQueue<Runnable>(maxThreads *
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
+ }
ThreadPoolExecutor tpe = new ThreadPoolExecutor(
coreThreads,
maxThreads,
@@ -400,14 +405,14 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
if (this.metaLookupPool == null) {
synchronized (this) {
if (this.metaLookupPool == null) {
- //The meta lookup can happen on replicas of the meta (if the appropriate configs
- //are enabled).In a replicated-meta setup, the number '3' is assumed as the max
- //number of replicas by default (unless it is configured to be of a higher value).
- //In a non-replicated-meta setup, only one thread would be active.
+ //Some of the threads would be used for meta replicas
+ //To start with, threads.max.core threads can hit the meta (including replicas).
+ //After that, requests will get queued up in the passed queue, and only after
+ //the queue is full, a new thread will be started
this.metaLookupPool = getThreadPool(
- conf.getInt("hbase.hconnection.meta.lookup.threads.max", 3),
- conf.getInt("hbase.hconnection.meta.lookup.threads.max.core", 3),
- "-metaLookup-shared-");
+ conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128),
+ conf.getInt("hbase.hconnection.meta.lookup.threads.max.core", 10),
+ "-metaLookup-shared-", new LinkedBlockingQueue<Runnable>());
}
}
}
[29/50] [abbrv] hbase git commit: HBASE-13202 Procedure v2 - core
framework (addendum)
Posted by jm...@apache.org.
HBASE-13202 Procedure v2 - core framework (addendum)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e75c6201
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e75c6201
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e75c6201
Branch: refs/heads/hbase-11339
Commit: e75c6201c69e57416525135a397a971ad4d1b902
Parents: e994b49
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Sat Apr 11 08:53:09 2015 +0100
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Sat Apr 11 08:53:09 2015 +0100
----------------------------------------------------------------------
.../apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/e75c6201/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index 6e7306c..7b9fc69 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -67,8 +67,8 @@ public class ProcedureTestingUtility {
int execThreads = procExecutor.getNumThreads();
// stop
procExecutor.stop();
- procStore.stop(false);
procExecutor.join();
+ procStore.stop(false);
// nothing running...
if (beforeStartAction != null) {
beforeStartAction.run();
[36/50] [abbrv] hbase git commit: HBASE-13455 Procedure V2 - master
truncate table
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index d83ee19..e0a4775 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -254,6 +254,133 @@ public final class MasterProcedureProtos {
}
/**
+ * Protobuf enum {@code TruncateTableState}
+ */
+ public enum TruncateTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>TRUNCATE_TABLE_PRE_OPERATION = 1;</code>
+ */
+ TRUNCATE_TABLE_PRE_OPERATION(0, 1),
+ /**
+ * <code>TRUNCATE_TABLE_REMOVE_FROM_META = 2;</code>
+ */
+ TRUNCATE_TABLE_REMOVE_FROM_META(1, 2),
+ /**
+ * <code>TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3;</code>
+ */
+ TRUNCATE_TABLE_CLEAR_FS_LAYOUT(2, 3),
+ /**
+ * <code>TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4;</code>
+ */
+ TRUNCATE_TABLE_CREATE_FS_LAYOUT(3, 4),
+ /**
+ * <code>TRUNCATE_TABLE_ADD_TO_META = 5;</code>
+ */
+ TRUNCATE_TABLE_ADD_TO_META(4, 5),
+ /**
+ * <code>TRUNCATE_TABLE_ASSIGN_REGIONS = 6;</code>
+ */
+ TRUNCATE_TABLE_ASSIGN_REGIONS(5, 6),
+ /**
+ * <code>TRUNCATE_TABLE_POST_OPERATION = 7;</code>
+ */
+ TRUNCATE_TABLE_POST_OPERATION(6, 7),
+ ;
+
+ /**
+ * <code>TRUNCATE_TABLE_PRE_OPERATION = 1;</code>
+ */
+ public static final int TRUNCATE_TABLE_PRE_OPERATION_VALUE = 1;
+ /**
+ * <code>TRUNCATE_TABLE_REMOVE_FROM_META = 2;</code>
+ */
+ public static final int TRUNCATE_TABLE_REMOVE_FROM_META_VALUE = 2;
+ /**
+ * <code>TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3;</code>
+ */
+ public static final int TRUNCATE_TABLE_CLEAR_FS_LAYOUT_VALUE = 3;
+ /**
+ * <code>TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4;</code>
+ */
+ public static final int TRUNCATE_TABLE_CREATE_FS_LAYOUT_VALUE = 4;
+ /**
+ * <code>TRUNCATE_TABLE_ADD_TO_META = 5;</code>
+ */
+ public static final int TRUNCATE_TABLE_ADD_TO_META_VALUE = 5;
+ /**
+ * <code>TRUNCATE_TABLE_ASSIGN_REGIONS = 6;</code>
+ */
+ public static final int TRUNCATE_TABLE_ASSIGN_REGIONS_VALUE = 6;
+ /**
+ * <code>TRUNCATE_TABLE_POST_OPERATION = 7;</code>
+ */
+ public static final int TRUNCATE_TABLE_POST_OPERATION_VALUE = 7;
+
+
+ public final int getNumber() { return value; }
+
+ public static TruncateTableState valueOf(int value) {
+ switch (value) {
+ case 1: return TRUNCATE_TABLE_PRE_OPERATION;
+ case 2: return TRUNCATE_TABLE_REMOVE_FROM_META;
+ case 3: return TRUNCATE_TABLE_CLEAR_FS_LAYOUT;
+ case 4: return TRUNCATE_TABLE_CREATE_FS_LAYOUT;
+ case 5: return TRUNCATE_TABLE_ADD_TO_META;
+ case 6: return TRUNCATE_TABLE_ASSIGN_REGIONS;
+ case 7: return TRUNCATE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<TruncateTableState>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<TruncateTableState>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<TruncateTableState>() {
+ public TruncateTableState findValueByNumber(int number) {
+ return TruncateTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2);
+ }
+
+ private static final TruncateTableState[] VALUES = values();
+
+ public static TruncateTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private TruncateTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:TruncateTableState)
+ }
+
+ /**
* Protobuf enum {@code DeleteTableState}
*/
public enum DeleteTableState
@@ -346,7 +473,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3);
}
private static final DeleteTableState[] VALUES = values();
@@ -455,7 +582,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
}
private static final AddColumnFamilyState[] VALUES = values();
@@ -564,7 +691,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
}
private static final ModifyColumnFamilyState[] VALUES = values();
@@ -682,7 +809,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
}
private static final DeleteColumnFamilyState[] VALUES = values();
@@ -800,7 +927,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
}
private static final EnableTableState[] VALUES = values();
@@ -918,7 +1045,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(8);
}
private static final DisableTableState[] VALUES = values();
@@ -2989,205 +3116,1670 @@ public final class MasterProcedureProtos {
unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
onChanged();
} else {
- unmodifiedTableSchemaBuilder_.clear();
+ unmodifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getUnmodifiedTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ if (unmodifiedTableSchemaBuilder_ != null) {
+ return unmodifiedTableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return unmodifiedTableSchema_;
+ }
+ }
+ /**
+ * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getUnmodifiedTableSchemaFieldBuilder() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ unmodifiedTableSchema_,
+ getParentForChildren(),
+ isClean());
+ unmodifiedTableSchema_ = null;
+ }
+ return unmodifiedTableSchemaBuilder_;
+ }
+
+ // required .TableSchema modified_table_schema = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> modifiedTableSchemaBuilder_;
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public boolean hasModifiedTableSchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ return modifiedTableSchema_;
+ } else {
+ return modifiedTableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder setModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ modifiedTableSchema_ = value;
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder setModifiedTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder mergeModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ modifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ modifiedTableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(modifiedTableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ modifiedTableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public Builder clearModifiedTableSchema() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getModifiedTableSchemaBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getModifiedTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() {
+ if (modifiedTableSchemaBuilder_ != null) {
+ return modifiedTableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return modifiedTableSchema_;
+ }
+ }
+ /**
+ * <code>required .TableSchema modified_table_schema = 3;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getModifiedTableSchemaFieldBuilder() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ modifiedTableSchema_,
+ getParentForChildren(),
+ isClean());
+ modifiedTableSchema_ = null;
+ }
+ return modifiedTableSchemaBuilder_;
+ }
+
+ // required bool delete_column_family_in_modify = 4;
+ private boolean deleteColumnFamilyInModify_ ;
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public boolean hasDeleteColumnFamilyInModify() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public boolean getDeleteColumnFamilyInModify() {
+ return deleteColumnFamilyInModify_;
+ }
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public Builder setDeleteColumnFamilyInModify(boolean value) {
+ bitField0_ |= 0x00000008;
+ deleteColumnFamilyInModify_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool delete_column_family_in_modify = 4;</code>
+ */
+ public Builder clearDeleteColumnFamilyInModify() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ deleteColumnFamilyInModify_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ModifyTableStateData)
+ }
+
+ static {
+ defaultInstance = new ModifyTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ModifyTableStateData)
+ }
+
+ public interface TruncateTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ boolean hasUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required bool preserve_splits = 2;
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ boolean hasPreserveSplits();
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ boolean getPreserveSplits();
+
+ // optional .TableName table_name = 3;
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // optional .TableSchema table_schema = 4;
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ boolean hasTableSchema();
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema();
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder();
+
+ // repeated .RegionInfo region_info = 5;
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>
+ getRegionInfoList();
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ int getRegionInfoCount();
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code TruncateTableStateData}
+ */
+ public static final class TruncateTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements TruncateTableStateDataOrBuilder {
+ // Use TruncateTableStateData.newBuilder() to construct.
+ private TruncateTableStateData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TruncateTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TruncateTableStateData defaultInstance;
+ public static TruncateTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TruncateTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TruncateTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ preserveSplits_ = input.readBool();
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 34: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = tableSchema_.toBuilder();
+ }
+ tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableSchema_);
+ tableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ case 42: {
+ if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>();
+ mutable_bitField0_ |= 0x00000010;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TruncateTableStateData> PARSER =
+ new com.google.protobuf.AbstractParser<TruncateTableStateData>() {
+ public TruncateTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TruncateTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TruncateTableStateData> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required bool preserve_splits = 2;
+ public static final int PRESERVE_SPLITS_FIELD_NUMBER = 2;
+ private boolean preserveSplits_;
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ public boolean hasPreserveSplits() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ public boolean getPreserveSplits() {
+ return preserveSplits_;
+ }
+
+ // optional .TableName table_name = 3;
+ public static final int TABLE_NAME_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // optional .TableSchema table_schema = 4;
+ public static final int TABLE_SCHEMA_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_;
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ return tableSchema_;
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ return tableSchema_;
+ }
+
+ // repeated .RegionInfo region_info = 5;
+ public static final int REGION_INFO_FIELD_NUMBER = 5;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ preserveSplits_ = false;
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasPreserveSplits()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasTableName()) {
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (hasTableSchema()) {
+ if (!getTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBool(2, preserveSplits_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, tableName_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(4, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(5, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(2, preserveSplits_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, tableName_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasPreserveSplits() == other.hasPreserveSplits());
+ if (hasPreserveSplits()) {
+ result = result && (getPreserveSplits()
+ == other.getPreserveSplits());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasTableSchema() == other.hasTableSchema());
+ if (hasTableSchema()) {
+ result = result && getTableSchema()
+ .equals(other.getTableSchema());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasPreserveSplits()) {
+ hash = (37 * hash) + PRESERVE_SPLITS_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getPreserveSplits());
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasTableSchema()) {
+ hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getTableSchema().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code TruncateTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ getTableSchemaFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ preserveSplits_ = false;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.preserveSplits_ = preserveSplits_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ if (tableSchemaBuilder_ == null) {
+ result.tableSchema_ = tableSchema_;
+ } else {
+ result.tableSchema_ = tableSchemaBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000010);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasPreserveSplits()) {
+ setPreserveSplits(other.getPreserveSplits());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasTableSchema()) {
+ mergeTableSchema(other.getTableSchema());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasPreserveSplits()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (hasTableName()) {
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ }
+ if (hasTableSchema()) {
+ if (!getTableSchema().isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * <code>required .UserInformation user_info = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required bool preserve_splits = 2;
+ private boolean preserveSplits_ ;
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ public boolean hasPreserveSplits() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ public boolean getPreserveSplits() {
+ return preserveSplits_;
+ }
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ public Builder setPreserveSplits(boolean value) {
+ bitField0_ |= 0x00000002;
+ preserveSplits_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool preserve_splits = 2;</code>
+ */
+ public Builder clearPreserveSplits() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ preserveSplits_ = false;
+ onChanged();
+ return this;
+ }
+
+ // optional .TableName table_name = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * <code>optional .TableName table_name = 3;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // optional .TableSchema table_schema = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_;
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ return tableSchema_;
+ } else {
+ return tableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableSchema_ = value;
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public Builder setTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ tableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ tableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ tableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public Builder clearTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ bitField0_ |= 0x00000008;
+ onChanged();
+ return getTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ if (tableSchemaBuilder_ != null) {
+ return tableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return tableSchema_;
+ }
+ }
+ /**
+ * <code>optional .TableSchema table_schema = 4;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableSchemaFieldBuilder() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ tableSchema_,
+ getParentForChildren(),
+ isClean());
+ tableSchema_ = null;
+ }
+ return tableSchemaBuilder_;
+ }
+
+ // repeated .RegionInfo region_info = 5;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo>(regionInfo_);
+ bitField0_ |= 0x00000010;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
}
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
}
/**
- * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() {
- bitField0_ |= 0x00000002;
- onChanged();
- return getUnmodifiedTableSchemaFieldBuilder().getBuilder();
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
}
/**
- * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
- if (unmodifiedTableSchemaBuilder_ != null) {
- return unmodifiedTableSchemaBuilder_.getMessageOrBuilder();
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
} else {
- return unmodifiedTableSchema_;
+ return regionInfoBuilder_.getMessage(index);
}
}
/**
- * <code>optional .TableSchema unmodified_table_schema = 2;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
- getUnmodifiedTableSchemaFieldBuilder() {
- if (unmodifiedTableSchemaBuilder_ == null) {
- unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
- unmodifiedTableSchema_,
- getParentForChildren(),
- isClean());
- unmodifiedTableSchema_ = null;
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
}
- return unmodifiedTableSchemaBuilder_;
+ return this;
}
-
- // required .TableSchema modified_table_schema = 3;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
- private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> modifiedTableSchemaBuilder_;
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public boolean hasModifiedTableSchema() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() {
- if (modifiedTableSchemaBuilder_ == null) {
- return modifiedTableSchema_;
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
} else {
- return modifiedTableSchemaBuilder_.getMessage();
+ regionInfoBuilder_.addMessage(value);
}
+ return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public Builder setModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
- if (modifiedTableSchemaBuilder_ == null) {
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
- modifiedTableSchema_ = value;
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
onChanged();
} else {
- modifiedTableSchemaBuilder_.setMessage(value);
+ regionInfoBuilder_.addMessage(index, value);
}
- bitField0_ |= 0x00000004;
return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public Builder setModifiedTableSchema(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
- if (modifiedTableSchemaBuilder_ == null) {
- modifiedTableSchema_ = builderForValue.build();
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
onChanged();
} else {
- modifiedTableSchemaBuilder_.setMessage(builderForValue.build());
+ regionInfoBuilder_.addMessage(builderForValue.build());
}
- bitField0_ |= 0x00000004;
return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public Builder mergeModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
- if (modifiedTableSchemaBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004) &&
- modifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
- modifiedTableSchema_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(modifiedTableSchema_).mergeFrom(value).buildPartial();
- } else {
- modifiedTableSchema_ = value;
- }
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
onChanged();
} else {
- modifiedTableSchemaBuilder_.mergeFrom(value);
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
}
- bitField0_ |= 0x00000004;
return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public Builder clearModifiedTableSchema() {
- if (modifiedTableSchemaBuilder_ == null) {
- modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ public Builder addAllRegionInfo(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
onChanged();
} else {
- modifiedTableSchemaBuilder_.clear();
+ regionInfoBuilder_.addAllMessages(values);
}
- bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getModifiedTableSchemaBuilder() {
- bitField0_ |= 0x00000004;
- onChanged();
- return getModifiedTableSchemaFieldBuilder().getBuilder();
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() {
- if (modifiedTableSchemaBuilder_ != null) {
- return modifiedTableSchemaBuilder_.getMessageOrBuilder();
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
} else {
- return modifiedTableSchema_;
+ regionInfoBuilder_.remove(index);
}
+ return this;
}
/**
- * <code>required .TableSchema modified_table_schema = 3;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
- getModifiedTableSchemaFieldBuilder() {
- if (modifiedTableSchemaBuilder_ == null) {
- modifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
- modifiedTableSchema_,
- getParentForChildren(),
- isClean());
- modifiedTableSchema_ = null;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .RegionInfo region_info = 5;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
}
- return modifiedTableSchemaBuilder_;
}
-
- // required bool delete_column_family_in_modify = 4;
- private boolean deleteColumnFamilyInModify_ ;
/**
- * <code>required bool delete_column_family_in_modify = 4;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public boolean hasDeleteColumnFamilyInModify() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
}
/**
- * <code>required bool delete_column_family_in_modify = 4;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public boolean getDeleteColumnFamilyInModify() {
- return deleteColumnFamilyInModify_;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
}
/**
- * <code>required bool delete_column_family_in_modify = 4;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public Builder setDeleteColumnFamilyInModify(boolean value) {
- bitField0_ |= 0x00000008;
- deleteColumnFamilyInModify_ = value;
- onChanged();
- return this;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
}
/**
- * <code>required bool delete_column_family_in_modify = 4;</code>
+ * <code>repeated .RegionInfo region_info = 5;</code>
*/
- public Builder clearDeleteColumnFamilyInModify() {
- bitField0_ = (bitField0_ & ~0x00000008);
- deleteColumnFamilyInModify_ = false;
- onChanged();
- return this;
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder>
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000010) == 0x00000010),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
}
- // @@protoc_insertion_point(builder_scope:ModifyTableStateData)
+ // @@protoc_insertion_point(builder_scope:TruncateTableStateData)
}
static {
- defaultInstance = new ModifyTableStateData(true);
+ defaultInstance = new TruncateTableStateData(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:ModifyTableStateData)
+ // @@protoc_insertion_point(class_scope:TruncateTableStateData)
}
public interface DeleteTableStateDataOrBuilder
@@ -9619,6 +11211,11 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ModifyTableStateData_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TruncateTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TruncateTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_DeleteTableStateData_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -9666,81 +11263,92 @@ public final class MasterProcedureProtos {
"n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
"leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
"\014.TableSchema\022&\n\036delete_column_family_in" +
- "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
- "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
- "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
- "o\030\003 \003(\0132\013.RegionInfo\"\300\001\n\030AddColumnFamily" +
+ "_modify\030\004 \002(\010\"\274\001\n\026TruncateTableStateData",
+ "\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\027\n" +
+ "\017preserve_splits\030\002 \002(\010\022\036\n\ntable_name\030\003 \001" +
+ "(\0132\n.TableName\022\"\n\014table_schema\030\004 \001(\0132\014.T" +
+ "ableSchema\022 \n\013region_info\030\005 \003(\0132\013.Region" +
+ "Info\"}\n\024DeleteTableStateData\022#\n\tuser_inf" +
+ "o\030\001 \002(\0132\020.UserInformation\022\036\n\ntable_name\030" +
+ "\002 \002(\0132\n.TableName\022 \n\013region_info\030\003 \003(\0132\013" +
+ ".RegionInfo\"\300\001\n\030AddColumnFamilyStateData" +
+ "\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n" +
+ "\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023columnf",
+ "amily_schema\030\003 \002(\0132\023.ColumnFamilySchema\022" +
+ "-\n\027unmodified_table_schema\030\004 \001(\0132\014.Table" +
+ "Schema\"\303\001\n\033ModifyColumnFamilyStateData\022#" +
+ "\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nt" +
+ "able_name\030\002 \002(\0132\n.TableName\0220\n\023columnfam" +
+ "ily_schema\030\003 \002(\0132\023.ColumnFamilySchema\022-\n" +
+ "\027unmodified_table_schema\030\004 \001(\0132\014.TableSc" +
+ "hema\"\254\001\n\033DeleteColumnFamilyStateData\022#\n\t" +
+ "user_info\030\001 \002(\0132\020.UserInformation\022\036\n\ntab" +
+ "le_name\030\002 \002(\0132\n.TableName\022\031\n\021columnfamil",
+ "y_name\030\003 \002(\014\022-\n\027unmodified_table_schema\030" +
+ "\004 \001(\0132\014.TableSchema\"{\n\024EnableTableStateD" +
+ "ata\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation" +
+ "\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\036\n\026skip" +
+ "_table_state_check\030\003 \002(\010\"|\n\025DisableTable" +
"StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" +
- "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220" +
- "\n\023columnfamily_schema\030\003 \002(\0132\023.ColumnFami" +
- "lySchema\022-\n\027unmodified_table_schema\030\004 \001(" +
- "\0132\014.TableSchema\"\303\001\n\033ModifyColumnFamilySt" +
- "ateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInforma" +
- "tion\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023",
- "columnfamily_schema\030\003 \002(\0132\023.ColumnFamily" +
- "Schema\022-\n\027unmodified_table_schema\030\004 \001(\0132" +
- "\014.TableSchema\"\254\001\n\033DeleteColumnFamilyStat" +
- "eData\022#\n\tuser_info\030\001 \002(\0132\020.UserInformati" +
- "on\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\031\n\021co" +
- "lumnfamily_name\030\003 \002(\014\022-\n\027unmodified_tabl" +
- "e_schema\030\004 \001(\0132\014.TableSchema\"{\n\024EnableTa" +
- "bleStateData\022#\n\tuser_info\030\001 \002(\0132\020.UserIn" +
- "formation\022\036\n\ntable_name\030\002 \002(\0132\n.TableNam" +
- "e\022\036\n\026skip_table_state_check\030\003 \002(\010\"|\n\025Dis",
- "ableTableStateData\022#\n\tuser_info\030\001 \002(\0132\020." +
- "UserInformation\022\036\n\ntable_name\030\002 \002(\0132\n.Ta" +
- "bleName\022\036\n\026skip_table_state_check\030\003 \002(\010*" +
- "\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE" +
- "_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LA" +
- "YOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033" +
- "CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_" +
- "TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABL" +
- "E_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022" +
- "\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABL",
- "E_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE" +
- "_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMO" +
- "VE_REPLICA_COLUMN\020\004\022!\n\035
<TRUNCATED>
[35/50] [abbrv] hbase git commit: HBASE-13455 Procedure V2 - master
truncate table
Posted by jm...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-protocol/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index a9ad0e0..e1c6880 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -75,6 +75,24 @@ message ModifyTableStateData {
required bool delete_column_family_in_modify = 4;
}
+enum TruncateTableState {
+ TRUNCATE_TABLE_PRE_OPERATION = 1;
+ TRUNCATE_TABLE_REMOVE_FROM_META = 2;
+ TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3;
+ TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4;
+ TRUNCATE_TABLE_ADD_TO_META = 5;
+ TRUNCATE_TABLE_ASSIGN_REGIONS = 6;
+ TRUNCATE_TABLE_POST_OPERATION = 7;
+}
+
+message TruncateTableStateData {
+ required UserInformation user_info = 1;
+ required bool preserve_splits = 2;
+ optional TableName table_name = 3;
+ optional TableSchema table_schema = 4;
+ repeated RegionInfo region_info = 5;
+}
+
enum DeleteTableState {
DELETE_TABLE_PRE_OPERATION = 1;
DELETE_TABLE_REMOVE_FROM_META = 2;
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index fdbc31c..45bcdcb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -88,7 +88,6 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
-import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
@@ -101,6 +100,7 @@ import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -1599,9 +1599,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preTruncateTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
- TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits);
- handler.prepare();
- handler.process();
+
+ long procId = this.procedureExecutor.submitProcedure(
+ new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, preserveSplits));
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+
if (cpHost != null) {
cpHost.postTruncateTable(tableName);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 84e9bef..2582a1e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -106,14 +106,15 @@ public class DeleteTableProcedure
return Flow.NO_MORE_STATE;
}
- preDelete(env);
-
// TODO: Move out... in the acquireLock()
LOG.debug("waiting for '" + getTableName() + "' regions in transition");
regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
ProcedureSyncWait.waitRegionInTransition(env, regions);
+ // Call coprocessors
+ preDelete(env);
+
setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META);
break;
case DELETE_TABLE_REMOVE_FROM_META:
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
new file mode 100644
index 0000000..5ef0a19
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.exceptions.HBaseException;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class TruncateTableProcedure
+ extends StateMachineProcedure<MasterProcedureEnv, TruncateTableState>
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(TruncateTableProcedure.class);
+
+ private boolean preserveSplits;
+ private List<HRegionInfo> regions;
+ private UserGroupInformation user;
+ private HTableDescriptor hTableDescriptor;
+ private TableName tableName;
+
+ public TruncateTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ }
+
+ public TruncateTableProcedure(final MasterProcedureEnv env, final TableName tableName,
+ boolean preserveSplits) throws IOException {
+ this.tableName = tableName;
+ this.preserveSplits = preserveSplits;
+ this.user = env.getRequestUser().getUGI();
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, TruncateTableState state) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case TRUNCATE_TABLE_PRE_OPERATION:
+ // Verify if we can truncate the table
+ if (!prepareTruncate(env)) {
+ assert isFailed() : "the truncate should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+
+ // TODO: Move out... in the acquireLock()
+ LOG.debug("waiting for '" + getTableName() + "' regions in transition");
+ regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+ assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
+ ProcedureSyncWait.waitRegionInTransition(env, regions);
+
+ // Call coprocessors
+ preTruncate(env);
+
+ setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META);
+ break;
+ case TRUNCATE_TABLE_REMOVE_FROM_META:
+ hTableDescriptor = env.getMasterServices().getTableDescriptors()
+ .getDescriptor(tableName).getHTableDescriptor();
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT);
+ break;
+ case TRUNCATE_TABLE_CLEAR_FS_LAYOUT:
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
+ if (!preserveSplits) {
+ // if we are not preserving splits, generate a new single region
+ regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
+ }
+ setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
+ break;
+ case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
+ regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions);
+ CreateTableProcedure.updateTableDescCache(env, getTableName());
+ setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
+ break;
+ case TRUNCATE_TABLE_ADD_TO_META:
+ regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions);
+ setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS);
+ break;
+ case TRUNCATE_TABLE_ASSIGN_REGIONS:
+ CreateTableProcedure.assignRegions(env, getTableName(), regions);
+ setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION);
+ hTableDescriptor = null;
+ regions = null;
+ break;
+ case TRUNCATE_TABLE_POST_OPERATION:
+ postTruncate(env);
+ LOG.debug("truncate '" + getTableName() + "' completed");
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (HBaseException|IOException e) {
+ LOG.warn("Retriable error trying to truncate table=" + getTableName() + " state=" + state, e);
+ } catch (InterruptedException e) {
+ // if the interrupt is real, the executor will be stopped.
+ LOG.warn("Interrupted trying to truncate table=" + getTableName() + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final TruncateTableState state) {
+ if (state == TruncateTableState.TRUNCATE_TABLE_PRE_OPERATION) {
+ // nothing to rollback, pre-truncate is just table-state checks.
+ // We can fail if the table does not exist or is not disabled.
+ return;
+ }
+
+ // The truncate doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+
+ @Override
+ protected TruncateTableState getState(final int stateId) {
+ return TruncateTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final TruncateTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected TruncateTableState getInitialState() {
+ return TruncateTableState.TRUNCATE_TABLE_PRE_OPERATION;
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ // TODO: We may be able to abort if the procedure is not started yet.
+ return false;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "truncate table");
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(" preserveSplits=");
+ sb.append(preserveSplits);
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.TruncateTableStateData.Builder state =
+ MasterProcedureProtos.TruncateTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setPreserveSplits(preserveSplits);
+ if (hTableDescriptor != null) {
+ state.setTableSchema(hTableDescriptor.convert());
+ } else {
+ state.setTableName(ProtobufUtil.toProtoTableName(tableName));
+ }
+ if (regions != null) {
+ for (HRegionInfo hri: regions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.TruncateTableStateData state =
+ MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ if (state.hasTableSchema()) {
+ hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
+ tableName = hTableDescriptor.getTableName();
+ } else {
+ tableName = ProtobufUtil.toTableName(state.getTableName());
+ }
+ preserveSplits = state.getPreserveSplits();
+ if (state.getRegionInfoCount() == 0) {
+ regions = null;
+ } else {
+ regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ regions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ private boolean prepareTruncate(final MasterProcedureEnv env) throws IOException {
+ try {
+ env.getMasterServices().checkTableModifiable(getTableName());
+ } catch (TableNotFoundException|TableNotDisabledException e) {
+ setFailure("master-truncate-table", e);
+ return false;
+ }
+ return true;
+ }
+
+ private boolean preTruncate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = getTableName();
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.preTruncateTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ return true;
+ }
+
+ private void postTruncate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final TableName tableName = getTableName();
+ user.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.postTruncateTableHandler(tableName);
+ return null;
+ }
+ });
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ff79569..9cb0d57c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2291,6 +2291,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
/**
+ * Return the number of rows in the given table.
+ */
+ public int countRows(final TableName tableName) throws IOException {
+ Table table = getConnection().getTable(tableName);
+ try {
+ return countRows(table);
+ } finally {
+ table.close();
+ }
+ }
+
+ /**
* Return an md5 digest of the entire contents of a table.
*/
public String checksumRows(final Table table) throws Exception {
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 9bb436e..57a15e8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -35,6 +35,10 @@ import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.HMaster;
@@ -44,6 +48,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.MD5Hash;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -85,6 +90,7 @@ public class MasterProcedureTestingUtility {
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
assertTrue(fs.exists(tableDir));
+ FSUtils.logFileSystemState(fs, tableDir, LOG);
List<Path> allRegionDirs = FSUtils.getRegionDirs(fs, tableDir);
for (int i = 0; i < regions.length; ++i) {
Path regionDir = new Path(tableDir, regions[i].getEncodedName());
@@ -343,6 +349,43 @@ public class MasterProcedureTestingUtility {
assertTrue(hcfd.equals(columnDescriptor));
}
+ public static void loadData(final Connection connection, final TableName tableName,
+ int rows, final byte[][] splitKeys, final String... sfamilies) throws IOException {
+ byte[][] families = new byte[sfamilies.length][];
+ for (int i = 0; i < families.length; ++i) {
+ families[i] = Bytes.toBytes(sfamilies[i]);
+ }
+
+ BufferedMutator mutator = connection.getBufferedMutator(tableName);
+
+ // Ensure one row per region
+ assertTrue(rows >= splitKeys.length);
+ for (byte[] k: splitKeys) {
+ byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k);
+ byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value)));
+ mutator.mutate(createPut(families, key, value));
+ rows--;
+ }
+
+ // Add other extra rows. more rows, more files
+ while (rows-- > 0) {
+ byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
+ byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
+ mutator.mutate(createPut(families, key, value));
+ }
+ mutator.flush();
+ }
+
+ private static Put createPut(final byte[][] families, final byte[] key, final byte[] value) {
+ byte[] q = Bytes.toBytes("q");
+ Put put = new Put(key);
+ put.setDurability(Durability.SKIP_WAL);
+ for (byte[] family: families) {
+ put.add(family, q, value);
+ }
+ return put;
+ }
+
public static class InjectAbortOnLoadListener
implements ProcedureExecutor.ProcedureExecutorListener {
private final ProcedureExecutor<MasterProcedureEnv> procExec;
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index 0f6c910..2576302 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTa
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -215,6 +216,67 @@ public class TestMasterFailoverWithProcedures {
}
// ==========================================================================
+ // Test Truncate Table
+ // ==========================================================================
+ @Test(timeout=90000)
+ public void testTruncateWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestTruncateTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testTruncateWithFailoverAtStep(true, TruncateTableState.TRUNCATE_TABLE_ADD_TO_META.ordinal());
+ }
+
+ private void testTruncateWithFailoverAtStep(final boolean preserveSplits, final int step)
+ throws Exception {
+ final TableName tableName = TableName.valueOf("testTruncateWithFailoverAtStep" + step);
+
+ // create the table
+ final String[] families = new String[] { "f1", "f2" };
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, families);
+ // load and verify that there are rows in the table
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 100, splitKeys, families);
+ assertEquals(100, UTIL.countRows(tableName));
+ // disable the table
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Truncate procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, TruncateTableState.values());
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ UTIL.waitUntilAllRegionsAssigned(tableName);
+
+ // validate the table regions and layout
+ if (preserveSplits) {
+ assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
+ } else {
+ regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
+ assertEquals(1, regions.length);
+ }
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
+
+ // verify that there are no rows in the table
+ assertEquals(0, UTIL.countRows(tableName));
+
+ // verify that the table is read/writable
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 50, splitKeys, families);
+ assertEquals(50, UTIL.countRows(tableName));
+ }
+
+ // ==========================================================================
// Test Disable Table
// ==========================================================================
@Test(timeout=60000)
http://git-wip-us.apache.org/repos/asf/hbase/blob/4788c6d1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
new file mode 100644
index 0000000..58acbae
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
@@ -0,0 +1,246 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestTruncateTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestTruncateTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ assertTrue("expected executor to be running", procExec.isRunning());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout=60000)
+ public void testTruncateNotExistentTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testTruncateNotExistentTable");
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, true));
+
+ // Second delete should fail with TableNotFound
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Truncate failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotFoundException);
+ }
+
+ @Test(timeout=60000)
+ public void testTruncateNotDisabledTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testTruncateNotDisabledTable");
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f");
+
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, false));
+
+ // Second delete should fail with TableNotDisabled
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Truncate failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotDisabledException);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleTruncatePreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleTruncatePreserveSplits");
+ testSimpleTruncate(tableName, true);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleTruncateNoPreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleTruncateNoPreserveSplits");
+ testSimpleTruncate(tableName, false);
+ }
+
+ private void testSimpleTruncate(final TableName tableName, final boolean preserveSplits)
+ throws Exception {
+ final String[] families = new String[] { "f1", "f2" };
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, families);
+ // load and verify that there are rows in the table
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 100, splitKeys, families);
+ assertEquals(100, UTIL.countRows(tableName));
+ // disable the table
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // truncate the table
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ UTIL.waitUntilAllRegionsAssigned(tableName);
+
+ // validate the table regions and layout
+ if (preserveSplits) {
+ assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
+ } else {
+ regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
+ assertEquals(1, regions.length);
+ }
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
+
+ // verify that there are no rows in the table
+ assertEquals(0, UTIL.countRows(tableName));
+
+ // verify that the table is read/writable
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 50, splitKeys, families);
+ assertEquals(50, UTIL.countRows(tableName));
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionPreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionPreserveSplits");
+ testRecoveryAndDoubleExecution(tableName, true);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionNoPreserveSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionNoPreserveSplits");
+ testRecoveryAndDoubleExecution(tableName, false);
+ }
+
+ private void testRecoveryAndDoubleExecution(final TableName tableName,
+ final boolean preserveSplits) throws Exception {
+ final String[] families = new String[] { "f1", "f2" };
+
+ // create the table
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, families);
+ // load and verify that there are rows in the table
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 100, splitKeys, families);
+ assertEquals(100, UTIL.countRows(tableName));
+ // disable the table
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Truncate procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
+
+ // Restart the executor and execute the step twice
+ // NOTE: the 7 (number of TruncateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec, procId, 7, TruncateTableState.values());
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+ UTIL.waitUntilAllRegionsAssigned(tableName);
+
+ // validate the table regions and layout
+ if (preserveSplits) {
+ assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
+ } else {
+ regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
+ assertEquals(1, regions.length);
+ }
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
+
+ // verify that there are no rows in the table
+ assertEquals(0, UTIL.countRows(tableName));
+
+ // verify that the table is read/writable
+ MasterProcedureTestingUtility.loadData(
+ UTIL.getConnection(), tableName, 50, splitKeys, families);
+ assertEquals(50, UTIL.countRows(tableName));
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
[32/50] [abbrv] hbase git commit: HBASE-13447 Bypass logic in
TimeRange.compare.
Posted by jm...@apache.org.
HBASE-13447 Bypass logic in TimeRange.compare.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/71536bdc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/71536bdc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/71536bdc
Branch: refs/heads/hbase-11339
Commit: 71536bdcc7d2d4b7a36a24ab9fa304cea4a8dda2
Parents: e9da064
Author: anoopsjohn <an...@gmail.com>
Authored: Tue Apr 14 11:37:48 2015 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Tue Apr 14 11:37:48 2015 +0530
----------------------------------------------------------------------
.../src/main/java/org/apache/hadoop/hbase/io/TimeRange.java | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/71536bdc/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
index 8c16389..8352e4e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
@@ -166,6 +166,7 @@ public class TimeRange {
* 1 if timestamp is greater than timerange
*/
public int compare(long timestamp) {
+ if (allTime) return 0;
if (timestamp < minStamp) {
return -1;
} else if (timestamp >= maxStamp) {