You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2018/11/16 21:07:38 UTC

hbase git commit: HBASE-21141 Enable MOB in backup / restore test involving incremental backup

Repository: hbase
Updated Branches:
  refs/heads/master 43a10df70 -> 825e14b68


HBASE-21141 Enable MOB in backup / restore test involving incremental backup

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/825e14b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/825e14b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/825e14b6

Branch: refs/heads/master
Commit: 825e14b68eefb232482dbc2a416c9844f03d01c4
Parents: 43a10df
Author: Artem Ervits <ge...@gmail.com>
Authored: Fri Nov 16 13:51:03 2018 -0500
Committer: tedyu <yu...@gmail.com>
Committed: Fri Nov 16 13:07:25 2018 -0800

----------------------------------------------------------------------
 .../hbase/backup/TestIncrementalBackup.java     | 275 ++++++++++---------
 1 file changed, 143 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/825e14b6/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 48e2c5e..6e15238 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -63,7 +63,7 @@ public class TestIncrementalBackup extends TestBackupBase {
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
     provider = "multiwal";
-    List<Object[]> params = new ArrayList<Object[]>();
+    List<Object[]> params = new ArrayList<>();
     params.add(new Object[] { Boolean.TRUE });
     return params;
   }
@@ -71,146 +71,157 @@ public class TestIncrementalBackup extends TestBackupBase {
   public TestIncrementalBackup(Boolean b) {
   }
 
-  // implement all test cases in 1 test since incremental backup/restore has dependencies
+  // implement all test cases in 1 test since incremental
+  // backup/restore has dependencies
   @Test
   public void TestIncBackupRestore() throws Exception {
-
     int ADD_ROWS = 99;
+
     // #1 - create full backup for all tables
     LOG.info("create full backup image for all tables");
-
     List<TableName> tables = Lists.newArrayList(table1, table2);
     final byte[] fam3Name = Bytes.toBytes("f3");
+    final byte[] mobName = Bytes.toBytes("mob");
+
     table1Desc.addFamily(new HColumnDescriptor(fam3Name));
+    HColumnDescriptor mobHcd = new HColumnDescriptor(mobName);
+    mobHcd.setMobEnabled(true);
+    mobHcd.setMobThreshold(5L);
+    table1Desc.addFamily(mobHcd);
     HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
 
-    Connection conn = ConnectionFactory.createConnection(conf1);
-    int NB_ROWS_FAM3 = 6;
-    insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
-
-    HBaseAdmin admin = null;
-    admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdminImpl client = new BackupAdminImpl(conn);
-
-    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
-    String backupIdFull = client.backupTables(request);
-
-    assertTrue(checkSucceeded(backupIdFull));
-
-    // #2 - insert some data to table
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
-    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
-
-    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
-    t1.close();
-    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
-
-    HTable t2 = (HTable) conn.getTable(table2);
-    Put p2;
-    for (int i = 0; i < 5; i++) {
-      p2 = new Put(Bytes.toBytes("row-t2" + i));
-      p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-      t2.put(p2);
-    }
-
-    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
-    t2.close();
-    LOG.debug("written " + 5 + " rows to " + table2);
-    // split table1
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    List<HRegion> regions = cluster.getRegions(table1);
-
-    byte[] name = regions.get(0).getRegionInfo().getRegionName();
-    long startSplitTime = EnvironmentEdgeManager.currentTime();
-    try {
-      admin.splitRegion(name);
-    } catch (IOException e) {
-      //although split fail, this may not affect following check
-      //In old split without AM2, if region's best split key is not found,
-      //there are not exception thrown. But in current API, exception
-      //will be thrown.
-      LOG.debug("region is not splittable, because " + e);
-    }
-
-    while (!admin.isTableAvailable(table1)) {
-      Thread.sleep(100);
+    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
+      int NB_ROWS_FAM3 = 6;
+      insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
+      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
+      HBaseAdmin admin = null;
+      admin = (HBaseAdmin) conn.getAdmin();
+      BackupAdminImpl client = new BackupAdminImpl(conn);
+      BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+      String backupIdFull = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdFull));
+
+      // #2 - insert some data to table
+      HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+      LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
+      Assert.assertEquals(HBaseTestingUtility.countRows(t1),
+              NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
+      LOG.debug("written " + ADD_ROWS + " rows to " + table1);
+
+      // additionally, insert rows to MOB cf
+      int NB_ROWS_MOB = 111;
+      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
+      LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
+      t1.close();
+      Assert.assertEquals(HBaseTestingUtility.countRows(t1),
+              NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
+
+      HTable t2 = (HTable) conn.getTable(table2);
+      Put p2;
+      for (int i = 0; i < 5; i++) {
+        p2 = new Put(Bytes.toBytes("row-t2" + i));
+        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t2.put(p2);
+      }
+      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(t2));
+      t2.close();
+      LOG.debug("written " + 5 + " rows to " + table2);
+
+      // split table1
+      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+      List<HRegion> regions = cluster.getRegions(table1);
+      byte[] name = regions.get(0).getRegionInfo().getRegionName();
+      long startSplitTime = EnvironmentEdgeManager.currentTime();
+
+      try {
+        admin.splitRegion(name);
+      } catch (IOException e) {
+        // although split fail, this may not affect following check in current API,
+        // exception will be thrown.
+        LOG.debug("region is not splittable, because " + e);
+      }
+      while (!admin.isTableAvailable(table1)) {
+        Thread.sleep(100);
+      }
+
+      long endSplitTime = EnvironmentEdgeManager.currentTime();
+      // split finished
+      LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
+
+      // #3 - incremental backup for multiple tables
+      tables = Lists.newArrayList(table1, table2);
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+      String backupIdIncMultiple = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+
+      // add column family f2 to table1
+      final byte[] fam2Name = Bytes.toBytes("f2");
+      table1Desc.addFamily(new HColumnDescriptor(fam2Name));
+
+      // drop column family f3
+      table1Desc.removeFamily(fam3Name);
+      HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
+
+      int NB_ROWS_FAM2 = 7;
+      HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
+      t3.close();
+
+      // Wait for 5 sec to make sure that old WALs were deleted
+      Thread.sleep(5000);
+
+      // #4 - additional incremental backup for multiple tables
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+      String backupIdIncMultiple2 = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple2));
+
+      // #5 - restore full backup for all tables
+      TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
+      TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
+
+      LOG.debug("Restoring full " + backupIdFull);
+      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
+                tablesRestoreFull, tablesMapFull, true));
+
+      // #6.1 - check tables for full restore
+      HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+      assertTrue(hAdmin.tableExists(table1_restore));
+      assertTrue(hAdmin.tableExists(table2_restore));
+      hAdmin.close();
+
+      // #6.2 - checking row count of tables for full restore
+      HTable hTable = (HTable) conn.getTable(table1_restore);
+      Assert.assertEquals(HBaseTestingUtility.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
+      hTable.close();
+
+      hTable = (HTable) conn.getTable(table2_restore);
+      Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtility.countRows(hTable));
+      hTable.close();
+
+      // #7 - restore incremental backup for multiple tables, with overwrite
+      TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
+      TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
+      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2,
+              false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+      hTable = (HTable) conn.getTable(table1_restore);
+
+      LOG.debug("After incremental restore: " + hTable.getDescriptor());
+      int countFamName = TEST_UTIL.countRows(hTable, famName);
+      LOG.debug("f1 has " + countFamName + " rows");
+      Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
+
+      int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
+      LOG.debug("f2 has " + countFam2Name + " rows");
+      Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
+
+      int countMobName = TEST_UTIL.countRows(hTable, mobName);
+      LOG.debug("mob has " + countMobName + " rows");
+      Assert.assertEquals(countMobName, NB_ROWS_MOB);
+      hTable.close();
+
+      hTable = (HTable) conn.getTable(table2_restore);
+      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(hTable));
+      hTable.close();
+      admin.close();
     }
-
-    long endSplitTime = EnvironmentEdgeManager.currentTime();
-
-    // split finished
-    LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
-
-    // #3 - incremental backup for multiple tables
-    tables = Lists.newArrayList(table1, table2);
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = client.backupTables(request);
-    assertTrue(checkSucceeded(backupIdIncMultiple));
-
-    // add column family f2 to table1
-    final byte[] fam2Name = Bytes.toBytes("f2");
-    table1Desc.addFamily(new HColumnDescriptor(fam2Name));
-    // drop column family f3
-    table1Desc.removeFamily(fam3Name);
-    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
-
-    int NB_ROWS_FAM2 = 7;
-    HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
-    t3.close();
-    // Wait for 5 sec to make sure that old WALs were deleted
-    Thread.sleep(5000);
-
-    // #3 - incremental backup for multiple tables
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
-    String backupIdIncMultiple2 = client.backupTables(request);
-    assertTrue(checkSucceeded(backupIdIncMultiple2));
-
-    // #4 - restore full backup for all tables
-    TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
-
-    TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
-
-    LOG.debug("Restoring full " + backupIdFull);
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
-      tablesRestoreFull, tablesMapFull, true));
-
-    // #5.1 - check tables for full restore
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
-    assertTrue(hAdmin.tableExists(table1_restore));
-    assertTrue(hAdmin.tableExists(table2_restore));
-
-    hAdmin.close();
-
-    // #5.2 - checking row count of tables for full restore
-    HTable hTable = (HTable) conn.getTable(table1_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
-    hTable.close();
-
-    hTable = (HTable) conn.getTable(table2_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
-    hTable.close();
-
-    // #6 - restore incremental backup for multiple tables, with overwrite
-    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
-    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2,
-      false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
-
-    hTable = (HTable) conn.getTable(table1_restore);
-    LOG.debug("After incremental restore: " + hTable.getDescriptor());
-    LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
-    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS);
-    LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows");
-    Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2);
-    hTable.close();
-
-    hTable = (HTable) conn.getTable(table2_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 5);
-    hTable.close();
-
-    admin.close();
-    conn.close();
-
   }
-
-}
+}
\ No newline at end of file