You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2019/08/25 00:46:22 UTC

[hbase] branch master updated: HBASE-22806 Recreating a deleted column family brings back the deleted cells (#530)

This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new ae107bd  HBASE-22806 Recreating a deleted column family brings back the deleted cells (#530)
ae107bd is described below

commit ae107bdb964e3a9815dea967d1dfd6071b81664f
Author: Pankaj <pa...@huawei.com>
AuthorDate: Sun Aug 25 06:16:16 2019 +0530

    HBASE-22806 Recreating a deleted column family brings back the deleted cells (#530)
    
    Signed-off-by: stack <st...@apache.org>
---
 .../master/procedure/ModifyTableProcedure.java     | 16 +++---
 .../hadoop/hbase/client/TestFromClientSide.java    | 57 ++++++++++++++++++++++
 2 files changed, 65 insertions(+), 8 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index dd834db..8e435dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -113,14 +113,6 @@ public class ModifyTableProcedure
           break;
         case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
           updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
-          if (deleteColumnFamilyInModify) {
-            setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
-          } else {
-            setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
-          }
-          break;
-        case MODIFY_TABLE_DELETE_FS_LAYOUT:
-          deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
           setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
           break;
         case MODIFY_TABLE_POST_OPERATION:
@@ -131,6 +123,14 @@ public class ModifyTableProcedure
           if (env.getAssignmentManager().isTableEnabled(getTableName())) {
             addChildProcedure(new ReopenTableRegionsProcedure(getTableName()));
           }
+          if (deleteColumnFamilyInModify) {
+            setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
+          } else {
+            return Flow.NO_MORE_STATE;
+          }
+          break;
+        case MODIFY_TABLE_DELETE_FS_LAYOUT:
+          deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
           return Flow.NO_MORE_STATE;
         default:
           throw new UnsupportedOperationException("unhandled state=" + state);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index ce07dee..1322c5b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -46,6 +46,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicReference;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
@@ -101,6 +102,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge;
 import org.apache.hadoop.hbase.util.TableDescriptorChecker;
 import org.junit.AfterClass;
@@ -6759,4 +6761,59 @@ public class TestFromClientSide {
 
     TEST_UTIL.getAdmin().modifyTable(newDesc);
   }
+
+  @Test(timeout = 60000)
+  public void testModifyTableWithMemstoreData() throws Exception {
+    TableName tableName = TableName.valueOf(name.getMethodName());
+    createTableAndValidateTableSchemaModification(tableName, true);
+  }
+
+  @Test(timeout = 60000)
+  public void testDeleteCFWithMemstoreData() throws Exception {
+    TableName tableName = TableName.valueOf(name.getMethodName());
+    createTableAndValidateTableSchemaModification(tableName, false);
+  }
+
+  /**
+   * Create table and validate online schema modification
+   * @param tableName Table name
+   * @param modifyTable Modify table if true otherwise delete column family
+   * @throws IOException in case of failures
+   */
+  private void createTableAndValidateTableSchemaModification(TableName tableName,
+      boolean modifyTable) throws Exception {
+    Admin admin = TEST_UTIL.getAdmin();
+    // Create table with two Cfs
+    byte[] cf1 = Bytes.toBytes("cf1");
+    byte[] cf2 = Bytes.toBytes("cf2");
+    TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
+        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1))
+        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build();
+    admin.createTable(tableDesc);
+
+    Table t = TEST_UTIL.getConnection().getTable(tableName);
+    // Insert few records and flush the table
+    t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val1")));
+    t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
+    admin.flush(tableName);
+    Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), tableName);
+    List<Path> regionDirs = FSUtils.getRegionDirs(TEST_UTIL.getTestFileSystem(), tableDir);
+    assertTrue(regionDirs.size() == 1);
+    List<Path> familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
+    assertTrue(familyDirs.size() == 2);
+
+    // Insert record but dont flush the table
+    t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val2")));
+    t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2")));
+
+    if (modifyTable) {
+      tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).removeColumnFamily(cf2).build();
+      admin.modifyTable(tableDesc);
+    } else {
+      admin.deleteColumnFamily(tableName, cf2);
+    }
+    // After table modification or delete family there should be only one CF in FS
+    familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0));
+    assertTrue("CF dir count should be 1, but was " + familyDirs.size(), familyDirs.size() == 1);
+  }
 }
\ No newline at end of file