You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2018/05/25 02:12:46 UTC

[07/36] hbase git commit: HBASE-20616 TruncateTableProcedure is stuck in retry loop in TRUNCATE_TABLE_CREATE_FS_LAYOUT state

HBASE-20616 TruncateTableProcedure is stuck in retry loop in TRUNCATE_TABLE_CREATE_FS_LAYOUT state

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/554d513f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/554d513f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/554d513f

Branch: refs/heads/HBASE-19064
Commit: 554d513f501490793f794bb6c9d484c379109072
Parents: 1792f54
Author: Toshihiro Suzuki <br...@gmail.com>
Authored: Thu May 24 00:26:01 2018 +0900
Committer: tedyu <yu...@gmail.com>
Committed: Thu May 24 15:16:30 2018 -0700

----------------------------------------------------------------------
 .../procedure/TruncateTableProcedure.java       | 13 ++-
 .../procedure/TestTruncateTableProcedure.java   | 90 ++++++++++++++++++++
 2 files changed, 102 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/554d513f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
index 4b2c21f..57ea6e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -121,6 +122,7 @@ public class TruncateTableProcedure
           setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
           break;
         case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
+          DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
           regions = CreateTableProcedure.createFsLayout(env, tableDescriptor, regions);
           CreateTableProcedure.updateTableDescCache(env, getTableName());
           setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
@@ -148,7 +150,8 @@ public class TruncateTableProcedure
       if (isRollbackSupported(state)) {
         setFailure("master-truncate-table", e);
       } else {
-        LOG.warn("Retriable error trying to truncate table=" + getTableName() + " state=" + state, e);
+        LOG.warn("Retriable error trying to truncate table=" + getTableName()
+          + " state=" + state, e);
       }
     }
     return Flow.HAS_MORE_STATE;
@@ -303,4 +306,12 @@ public class TruncateTableProcedure
       cpHost.postCompletedTruncateTableAction(tableName, getUser());
     }
   }
+
+  @VisibleForTesting
+  RegionInfo getFirstRegionInfo() {
+    if (regions == null || regions.isEmpty()) {
+      return null;
+    }
+    return regions.get(0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/554d513f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
index acd883d..d6d5421 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
@@ -19,18 +19,26 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
@@ -38,6 +46,7 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestTruncateTableProcedure extends TestTableDDLProcedureBase {
@@ -217,4 +226,85 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase {
       UTIL.getConnection(), tableName, 50, splitKeys, families);
     assertEquals(50, UTIL.countRows(tableName));
   }
+
+  @Test
+  public void testOnHDFSFailurePreserveSplits() throws Exception {
+    final TableName tableName = TableName.valueOf(name.getMethodName());
+    testOnHDFSFailure(tableName, true);
+  }
+
+  @Test
+  public void testOnHDFSFailureNoPreserveSplits() throws Exception {
+    final TableName tableName = TableName.valueOf(name.getMethodName());
+    testOnHDFSFailure(tableName, false);
+  }
+
+  public static class TruncateTableProcedureOnHDFSFailure extends TruncateTableProcedure {
+
+    private boolean failOnce = false;
+
+    public TruncateTableProcedureOnHDFSFailure() {
+      // Required by the Procedure framework to create the procedure on replay
+      super();
+    }
+
+    public TruncateTableProcedureOnHDFSFailure(final MasterProcedureEnv env, TableName tableName,
+      boolean preserveSplits)
+      throws HBaseIOException {
+      super(env, tableName, preserveSplits);
+    }
+
+    @Override
+    protected Flow executeFromState(MasterProcedureEnv env,
+      MasterProcedureProtos.TruncateTableState state) throws InterruptedException {
+
+      if (!failOnce &&
+        state == MasterProcedureProtos.TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT) {
+        try {
+          // To emulate an HDFS failure, create only the first region directory
+          RegionInfo regionInfo = getFirstRegionInfo();
+          Configuration conf = env.getMasterConfiguration();
+          MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+          Path tempdir = mfs.getTempDir();
+          Path tableDir = FSUtils.getTableDir(tempdir, regionInfo.getTable());
+          Path regionDir = FSUtils.getRegionDir(tableDir, regionInfo);
+          FileSystem fs = FileSystem.get(conf);
+          fs.mkdirs(regionDir);
+
+          failOnce = true;
+          return Flow.HAS_MORE_STATE;
+        } catch (IOException e) {
+          fail("failed to create a region directory: " + e);
+        }
+      }
+
+      return super.executeFromState(env, state);
+    }
+  }
+
+  private void testOnHDFSFailure(TableName tableName, boolean preserveSplits) throws Exception {
+    String[] families = new String[] { "f1", "f2" };
+    byte[][] splitKeys = new byte[][] {
+      Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+    };
+
+    // create a table
+    MasterProcedureTestingUtility.createTable(
+      getMasterProcedureExecutor(), tableName, splitKeys, families);
+
+    // load and verify that there are rows in the table
+    MasterProcedureTestingUtility.loadData(
+      UTIL.getConnection(), tableName, 100, splitKeys, families);
+    assertEquals(100, UTIL.countRows(tableName));
+
+    // disable the table
+    UTIL.getAdmin().disableTable(tableName);
+
+    // truncate the table
+    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+    long procId = ProcedureTestingUtility.submitAndWait(procExec,
+      new TruncateTableProcedureOnHDFSFailure(procExec.getEnvironment(), tableName,
+        preserveSplits));
+    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+  }
 }