You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/08/23 01:40:18 UTC

hbase git commit: HBASE-16255 Backup/Restore IT (Vladimir Rodionov)

Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 cd2712e43 -> 7c6b10ff4


HBASE-16255 Backup/Restore IT (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c6b10ff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c6b10ff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c6b10ff

Branch: refs/heads/HBASE-7912
Commit: 7c6b10ff44404db384987698155d7b96741dd1d8
Parents: cd2712e
Author: tedyu <yu...@gmail.com>
Authored: Mon Aug 22 18:40:10 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Mon Aug 22 18:40:10 2016 -0700

----------------------------------------------------------------------
 .../hbase/IntegrationTestBackupRestore.java     | 293 +++++++++++++++++++
 .../hadoop/hbase/HBaseTestingUtility.java       |  31 +-
 2 files changed, 321 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c6b10ff/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
new file mode 100644
index 0000000..52f1c32
--- /dev/null
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.BackupAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
+import org.apache.hadoop.util.ToolRunner;
+import org.hamcrest.CoreMatchers;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+
+/**
+ * An integration test to detect regressions in HBASE-7912. Create
+ * a table with many regions, load data, perform series backup/load operations,
+ * then restore and verify data
+ * @see <a href="https://issues.apache.org/jira/browse/HBASE-7912">HBASE-7912</a>
+ */
+@Category(IntegrationTests.class)
+public class IntegrationTestBackupRestore extends IntegrationTestBase {
+  private static final String CLASS_NAME = IntegrationTestBackupRestore.class.getSimpleName();
+  protected static final Log LOG = LogFactory.getLog(IntegrationTestBackupRestore.class);
+  protected static final TableName TABLE_NAME1 = TableName.valueOf(CLASS_NAME + ".table1");
+  protected static final TableName TABLE_NAME2 = TableName.valueOf(CLASS_NAME + ".table2");
+  protected static final String COLUMN_NAME = "f";
+  protected static final String REGION_COUNT_KEY = String.format("hbase.%s.regions.perRS", CLASS_NAME);
+  protected static final String REGIONSERVER_COUNT_KEY = String.format("hbase.%s.regionServers",
+    CLASS_NAME);
+  protected static final int DEFAULT_REGION_COUNT = 10;
+  protected static final int DEFAULT_REGIONSERVER_COUNT = 5;
+  protected static int regionsCountPerServer;
+  protected static int regionServerCount;
+  protected static final String NB_ROWS_IN_BATCH_KEY =
+      String.format("hbase.%s.rows-in-batch", CLASS_NAME);
+  protected static final int DEFAULT_NB_ROWS_IN_BATCH = 20000;
+  private static int rowsInBatch;
+  private static String BACKUP_ROOT_DIR = "backupIT";
+
+  @Before
+  public void setUp() throws Exception {
+    util = new IntegrationTestingUtility();
+    regionsCountPerServer = util.getConfiguration().getInt(REGION_COUNT_KEY, DEFAULT_REGION_COUNT);
+    regionServerCount =
+        util.getConfiguration().getInt(REGIONSERVER_COUNT_KEY, DEFAULT_REGIONSERVER_COUNT);
+    rowsInBatch = util.getConfiguration().getInt(NB_ROWS_IN_BATCH_KEY, DEFAULT_NB_ROWS_IN_BATCH);
+    LOG.info(String.format("Initializing cluster with %d region servers.", regionServerCount));
+    util.initializeCluster(regionServerCount);
+    LOG.info("Cluster initialized");    
+    util.deleteTableIfAny(TABLE_NAME1);
+    util.deleteTableIfAny(TABLE_NAME2);
+    util.waitTableAvailable(BackupSystemTable.getTableName());
+    LOG.info("Cluster ready");
+  }
+
+
+  @After
+  public void tearDown() throws IOException {
+    LOG.info("Cleaning up after test.");
+    util.deleteTableIfAny(TABLE_NAME1);
+    util.deleteTableIfAny(TABLE_NAME2);
+    cleanUpBackupDir();
+    LOG.info("Restoring cluster.");
+    util.restoreCluster();
+    LOG.info("Cluster restored.");
+  }
+
+  private void cleanUpBackupDir() throws IOException {
+    FileSystem fs = FileSystem.get(util.getConfiguration());
+    fs.delete(new Path(BACKUP_ROOT_DIR), true);    
+  }
+
+
+  @Test
+  public void testBackupRestore() throws Exception {
+    BACKUP_ROOT_DIR = util.getConfiguration().get("fs.defaultFS") + BACKUP_ROOT_DIR;
+    createTable(TABLE_NAME1);
+    createTable(TABLE_NAME2);
+    runTest();
+  }
+
+  private void createTable(TableName tableName) throws Exception {
+    long startTime, endTime;
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    HColumnDescriptor[] columns = 
+        new HColumnDescriptor[]{new HColumnDescriptor(COLUMN_NAME)};
+    SplitAlgorithm algo = new RegionSplitter.UniformSplit();
+    LOG.info(String.format("Creating table %s with %d splits.", tableName, 
+      regionsCountPerServer));
+    startTime = System.currentTimeMillis();
+    HBaseTestingUtility.createPreSplitLoadTestTable(conf, desc, columns, 
+      algo, regionsCountPerServer);
+    util.waitTableAvailable(tableName);
+    endTime = System.currentTimeMillis();
+    LOG.info(String.format("Pre-split table created successfully in %dms.", 
+      (endTime - startTime)));
+  }
+
+  private void loadData(TableName table, int numRows) throws IOException {
+    Connection conn = util.getConnection();
+    // #0- insert some data to a table
+    HTable t1 = (HTable) conn.getTable(table);
+    util.loadRandomRows(t1, new byte[]{'f'}, 100, numRows);
+    t1.flushCommits();
+  }
+
+  private void runTest() throws IOException {
+    Connection conn = util.getConnection();
+    // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
+    loadData(TABLE_NAME1, rowsInBatch);
+    loadData(TABLE_NAME2, rowsInBatch);
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+    List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+    HBaseAdmin admin = null;
+    admin = (HBaseAdmin) conn.getAdmin();
+    BackupRequest request = new BackupRequest();
+    request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdFull = admin.getBackupAdmin().backupTables(request);
+    assertTrue(checkSucceeded(backupIdFull));
+    // #2 - insert some data to table
+    loadData(TABLE_NAME1, rowsInBatch);
+    loadData(TABLE_NAME2, rowsInBatch);
+    HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
+    Assert.assertThat(util.countRows(t1), CoreMatchers.equalTo(rowsInBatch * 2));
+    t1.close();
+    HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
+    Assert.assertThat(util.countRows(t2), CoreMatchers.equalTo(rowsInBatch * 2));
+    t2.close();
+    // #3 - incremental backup for tables
+    tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+    request = new BackupRequest();
+    request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+        .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
+    assertTrue(checkSucceeded(backupIdIncMultiple));
+    // #4 - restore full backup for all tables, without overwrite
+    TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+    BackupAdmin client = util.getAdmin().getBackupAdmin();
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull,
+      null, false));
+    // #5.1 - check tables for full restore
+    Admin hAdmin = util.getConnection().getAdmin();
+    assertTrue(hAdmin.tableExists(TABLE_NAME1));
+    assertTrue(hAdmin.tableExists(TABLE_NAME2));
+    hAdmin.close();
+    // #5.2 - checking row count of tables for full restore
+    HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch));
+    hTable.close();
+    hTable = (HTable) conn.getTable(TABLE_NAME2);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch));
+    hTable.close();
+    // #6 - restore incremental backup for multiple tables, with overwrite
+    TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false,
+      tablesRestoreIncMultiple, null, true));
+    hTable = (HTable) conn.getTable(TABLE_NAME1);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch * 2));
+    hTable.close();
+    hTable = (HTable) conn.getTable(TABLE_NAME2);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch * 2));
+    hTable.close();
+    admin.close();
+    conn.close();
+  }
+
+  protected boolean checkSucceeded(String backupId) throws IOException {
+    BackupInfo status = getBackupContext(backupId);
+    if (status == null) return false;
+    return status.getState() == BackupState.COMPLETE;
+  }
+
+  private BackupInfo getBackupContext(String backupId) throws IOException {
+    try (BackupSystemTable table = new BackupSystemTable(util.getConnection())) {
+      BackupInfo status = table.readBackupInfo(backupId);
+      return status;
+    }
+  }
+
+  /**
+   * Get restore request.
+   */
+  public RestoreRequest createRestoreRequest(String backupRootDir, String backupId, boolean check,
+      TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
+    RestoreRequest request = new RestoreRequest();
+    request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check)
+        .setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite);
+    return request;
+  }
+
+  @Override
+  public void setUpCluster() throws Exception {
+    util = getTestingUtil(getConf());
+    LOG.debug("Initializing/checking cluster has " + regionServerCount + " servers");
+    util.initializeCluster(regionServerCount);
+    LOG.debug("Done initializing/checking cluster");
+  }
+
+  @Override
+  public int runTestFromCommandLine() throws Exception {
+    testBackupRestore();
+    return 0;
+  }
+
+  @Override
+  public TableName getTablename() {
+    return null;
+  }
+
+  @Override
+  protected Set<String> getColumnFamilies() {
+    return null;
+  }
+
+  @Override
+  protected void addOptions() {
+    addOptWithArg(REGIONSERVER_COUNT_KEY, "Total number of region servers. Default: '"
+        + DEFAULT_REGIONSERVER_COUNT + "'");
+    addOptWithArg(REGION_COUNT_KEY, "Total number of regions. Default: " + DEFAULT_REGION_COUNT);
+    addOptWithArg(NB_ROWS_IN_BATCH_KEY, "Total number of data rows to be loaded (per table/batch."
+        + " Total number of batches=2). Default: " + DEFAULT_NB_ROWS_IN_BATCH);
+
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    super.processOptions(cmd);
+    regionsCountPerServer =
+        Integer.parseInt(cmd.getOptionValue(REGION_COUNT_KEY,
+          Integer.toString(DEFAULT_REGION_COUNT)));
+    regionServerCount =
+        Integer.parseInt(cmd.getOptionValue(REGIONSERVER_COUNT_KEY,
+          Integer.toString(DEFAULT_REGIONSERVER_COUNT)));
+    rowsInBatch =
+        Integer.parseInt(cmd.getOptionValue(NB_ROWS_IN_BATCH_KEY,
+          Integer.toString(DEFAULT_NB_ROWS_IN_BATCH)));
+    LOG.info(Objects.toStringHelper("Parsed Options").add(REGION_COUNT_KEY, regionsCountPerServer)
+        .add(REGIONSERVER_COUNT_KEY, regionServerCount).add(NB_ROWS_IN_BATCH_KEY, rowsInBatch)
+        .toString());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    IntegrationTestingUtility.setUseDistributedCluster(conf);
+    int status = ToolRunner.run(conf, new IntegrationTestBackupRestore(), args);
+    System.exit(status);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c6b10ff/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 0d830f2..2ab8cb8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -115,6 +115,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -2076,6 +2077,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     }
   }
 
+  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
+      throws IOException {
+    Random r = new Random();
+    byte[] row = new byte[rowSize];
+    for (int i = 0; i < totalRows; i++) {
+      r.nextBytes(row);
+      Put put = new Put(row);
+      put.addColumn(f, new byte[]{0}, new byte[]{0});
+      t.put(put);
+    }
+  }  
+  
   public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
       int replicaId)
       throws IOException {
@@ -3632,7 +3645,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     return createPreSplitLoadTestTable(conf, desc, new HColumnDescriptor[] {hcd},
         numRegionsPerServer);
   }
-
+  
   /**
    * Creates a pre-split table for load testing. If the table already exists,
    * logs a warning and continues.
@@ -3640,12 +3653,24 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
    */
   public static int createPreSplitLoadTestTable(Configuration conf,
       HTableDescriptor desc, HColumnDescriptor[] hcds, int numRegionsPerServer) throws IOException {
+    
+    return createPreSplitLoadTestTable(conf, desc, hcds, 
+      new RegionSplitter.HexStringSplit(), numRegionsPerServer);
+  }
+    
+  /**
+   * Creates a pre-split table for load testing. If the table already exists,
+   * logs a warning and continues.
+   * @return the number of regions the table was split into
+   */
+  public static int createPreSplitLoadTestTable(Configuration conf,
+      HTableDescriptor desc, HColumnDescriptor[] hcds,
+      SplitAlgorithm splitter, int numRegionsPerServer) throws IOException {
     for (HColumnDescriptor hcd : hcds) {
       if (!desc.hasFamily(hcd.getName())) {
         desc.addFamily(hcd);
       }
     }
-
     int totalNumberOfRegions = 0;
     Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
     Admin admin = unmanagedConnection.getAdmin();
@@ -3664,7 +3689,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
           "pre-splitting table into " + totalNumberOfRegions + " regions " +
           "(regions per server: " + numRegionsPerServer + ")");
 
-      byte[][] splits = new RegionSplitter.HexStringSplit().split(
+      byte[][] splits = splitter.split(
           totalNumberOfRegions);
 
       admin.createTable(desc, splits);