You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2016/03/22 03:42:13 UTC

[44/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
new file mode 100644
index 0000000..21bf63c
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupBoundaryTests extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class);
+
+  /**
+   * Verify that full backup is created on a single empty table correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupSingleEmpty() throws Exception {
+
+    LOG.info("create full backup image on single table");
+    List<TableName> tables = Lists.newArrayList(table3);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    LOG.info("Finished Backup");
+    assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup is created on multiple empty tables correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupMultipleEmpty() throws Exception {
+    LOG.info("create full backup image on mulitple empty tables");
+
+    List<TableName> tables = Lists.newArrayList(table3, table4);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup fails on a single table that does not exist.
+   * @throws Exception
+   */
+  @Test(expected = DoNotRetryIOException.class)
+  public void testFullBackupSingleDNE() throws Exception {
+
+    LOG.info("test full backup fails on a single table that does not exist");
+    List<TableName> tables = toList("tabledne");
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup fails on multiple tables that do not exist.
+   * @throws Exception
+   */
+  @Test(expected = DoNotRetryIOException.class)
+  public void testFullBackupMultipleDNE() throws Exception {
+
+    LOG.info("test full backup fails on multiple tables that do not exist");
+    List<TableName> tables = toList("table1dne", "table2dne");
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup fails on tableset containing real and fake tables.
+   * @throws Exception
+   */
+  @Test(expected = DoNotRetryIOException.class)
+  public void testFullBackupMixExistAndDNE() throws Exception {
+    LOG.info("create full backup fails on tableset containing real and fake table");
+
+    List<TableName> tables = toList(table1.getNameAsString(), "tabledne");
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    //assertTrue(checkSucceeded(backupId)); // TODO
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
new file mode 100644
index 0000000..899f53b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupLogCleaner extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
+
+  // implements all test cases in 1 test since incremental full backup/
+  // incremental backup has dependencies
+  @Test
+  public void testBackupLogCleaner() throws Exception {
+
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
+
+    try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+        BackupSystemTable systemTable = new BackupSystemTable(connection)) {
+      // Verify that we have no backup sessions yet
+      assertFalse(systemTable.hasBackupSessions());
+
+      List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+      List<String> swalFiles = convert(walFiles);
+      BackupLogCleaner cleaner = new BackupLogCleaner();
+      cleaner.setConf(TEST_UTIL.getConfiguration());
+
+      Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
+      // We can delete all files because we do not have yet recorded backup sessions
+      assertTrue(Iterables.size(deletable) == walFiles.size());
+
+      systemTable.addWALFiles(swalFiles, "backup");
+      String backupIdFull = getBackupClient().create(BackupType.FULL, tableSetFullList,
+        BACKUP_ROOT_DIR);
+      assertTrue(checkSucceeded(backupIdFull));
+      // Check one more time
+      deletable = cleaner.getDeletableFiles(walFiles);
+      // We can delete wal files because they were saved into hbase:backup table
+      int size = Iterables.size(deletable);
+      assertTrue(size == walFiles.size());
+
+      List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+      LOG.debug("WAL list after full backup");
+      convert(newWalFiles);
+
+      // New list of wal files is greater than the previous one,
+      // because new wal per RS have been opened after full backup
+      assertTrue(walFiles.size() < newWalFiles.size());
+      // TODO : verify that result files are not walFiles collection
+      Connection conn = ConnectionFactory.createConnection(conf1);
+      // #2 - insert some data to table
+      HTable t1 = (HTable) conn.getTable(table1);
+      Put p1;
+      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+        p1 = new Put(Bytes.toBytes("row-t1" + i));
+        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t1.put(p1);
+      }
+
+      t1.close();
+
+      HTable t2 = (HTable) conn.getTable(table2);
+      Put p2;
+      for (int i = 0; i < 5; i++) {
+        p2 = new Put(Bytes.toBytes("row-t2" + i));
+        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t2.put(p2);
+      }
+
+      t2.close();
+
+      // #3 - incremental backup for multiple tables
+
+      List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
+      String backupIdIncMultiple =
+          getBackupClient().create(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+      deletable = cleaner.getDeletableFiles(newWalFiles);
+
+      assertTrue(Iterables.size(deletable) == newWalFiles.size());
+
+      conn.close();
+    }
+  }
+
+  private List<String> convert(List<FileStatus> walFiles) {
+    List<String> result = new ArrayList<String>();
+    for (FileStatus fs : walFiles) {
+      LOG.debug("+++WAL: " + fs.getPath().toString());
+      result.add(fs.getPath().toString());
+    }
+    return result;
+  }
+
+  private List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
+    Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME);
+    FileSystem fs = FileSystem.get(c);
+    RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
+    List<FileStatus> logFiles = new ArrayList<FileStatus>();
+    while (it.hasNext()) {
+      LocatedFileStatus lfs = it.next();
+      if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) {
+        logFiles.add(lfs);
+        LOG.info(lfs);
+      }
+    }
+    return logFiles;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
new file mode 100644
index 0000000..2dc31df
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -0,0 +1,350 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupContext;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test cases for hbase:backup API
+ *
+ */
+@Category(MediumTests.class)
+public class TestBackupSystemTable {
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  protected static Configuration conf = UTIL.getConfiguration();
+  protected static MiniHBaseCluster cluster;
+  protected static Connection conn;
+  protected BackupSystemTable table;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    cluster = UTIL.startMiniCluster();
+    conn = ConnectionFactory.createConnection(UTIL.getConfiguration());
+  }
+
+  @Before
+  public void before() throws IOException {
+    table = new BackupSystemTable(conn);
+  }
+
+  @After
+  public void after() {
+    if (table != null) {
+      table.close();
+    }
+  }
+
+  @Test
+  public void testUpdateReadDeleteBackupStatus() throws IOException {
+    BackupContext ctx = createBackupContext();
+    table.updateBackupStatus(ctx);
+    BackupContext readCtx = table.readBackupStatus(ctx.getBackupId());
+    assertTrue(compare(ctx, readCtx));
+
+    // try fake backup id
+    readCtx = table.readBackupStatus("fake");
+
+    assertNull(readCtx);
+    // delete backup context
+    table.deleteBackupStatus(ctx.getBackupId());
+    readCtx = table.readBackupStatus(ctx.getBackupId());
+    assertNull(readCtx);
+    cleanBackupTable();
+  }
+
+  @Test
+  public void testWriteReadBackupStartCode() throws IOException {
+    Long code = 100L;
+    table.writeBackupStartCode(code);
+    String readCode = table.readBackupStartCode();
+    assertEquals(code, new Long(Long.parseLong(readCode)));
+    cleanBackupTable();
+  }
+
+  private void cleanBackupTable() throws IOException {
+    Admin admin = UTIL.getHBaseAdmin();
+    admin.disableTable(BackupSystemTable.getTableName());
+    admin.truncateTable(BackupSystemTable.getTableName(), true);
+    if (admin.isTableDisabled(BackupSystemTable.getTableName())) {
+      admin.enableTable(BackupSystemTable.getTableName());
+    }
+  }
+
+  @Test
+  public void testBackupHistory() throws IOException {
+    int n = 10;
+    List<BackupContext> list = createBackupContextList(n);
+
+    // Load data
+    for (BackupContext bc : list) {
+      // Make sure we set right status
+      bc.setState(BackupState.COMPLETE);
+      table.updateBackupStatus(bc);
+    }
+
+    // Reverse list for comparison
+    Collections.reverse(list);
+    ArrayList<BackupCompleteData> history = table.getBackupHistory();
+    assertTrue(history.size() == n);
+
+    for (int i = 0; i < n; i++) {
+      BackupContext ctx = list.get(i);
+      BackupCompleteData data = history.get(i);
+      assertTrue(compare(ctx, data));
+    }
+
+    cleanBackupTable();
+
+  }
+
+  @Test
+  public void testRegionServerLastLogRollResults() throws IOException {
+    String[] servers = new String[] { "server1", "server2", "server3" };
+    Long[] timestamps = new Long[] { 100L, 102L, 107L };
+
+    for (int i = 0; i < servers.length; i++) {
+      table.writeRegionServerLastLogRollResult(servers[i], timestamps[i]);
+    }
+
+    HashMap<String, Long> result = table.readRegionServerLastLogRollResult();
+    assertTrue(servers.length == result.size());
+    Set<String> keys = result.keySet();
+    String[] keysAsArray = new String[keys.size()];
+    keys.toArray(keysAsArray);
+    Arrays.sort(keysAsArray);
+
+    for (int i = 0; i < keysAsArray.length; i++) {
+      assertEquals(keysAsArray[i], servers[i]);
+      Long ts1 = timestamps[i];
+      Long ts2 = result.get(keysAsArray[i]);
+      assertEquals(ts1, ts2);
+    }
+
+    cleanBackupTable();
+  }
+
+  @Test
+  public void testIncrementalBackupTableSet() throws IOException {
+    TreeSet<TableName> tables1 = new TreeSet<>();
+
+    tables1.add(TableName.valueOf("t1"));
+    tables1.add(TableName.valueOf("t2"));
+    tables1.add(TableName.valueOf("t3"));
+
+    TreeSet<TableName> tables2 = new TreeSet<>();
+
+    tables2.add(TableName.valueOf("t3"));
+    tables2.add(TableName.valueOf("t4"));
+    tables2.add(TableName.valueOf("t5"));
+
+    table.addIncrementalBackupTableSet(tables1);
+    TreeSet<TableName> res1 = (TreeSet<TableName>) table.getIncrementalBackupTableSet();
+    assertTrue(tables1.size() == res1.size());
+    Iterator<TableName> desc1 = tables1.descendingIterator();
+    Iterator<TableName> desc2 = res1.descendingIterator();
+    while (desc1.hasNext()) {
+      assertEquals(desc1.next(), desc2.next());
+    }
+
+    table.addIncrementalBackupTableSet(tables2);
+    TreeSet<TableName> res2 = (TreeSet<TableName>) table.getIncrementalBackupTableSet();
+    assertTrue((tables2.size() + tables1.size() - 1) == res2.size());
+
+    tables1.addAll(tables2);
+
+    desc1 = tables1.descendingIterator();
+    desc2 = res2.descendingIterator();
+
+    while (desc1.hasNext()) {
+      assertEquals(desc1.next(), desc2.next());
+    }
+    cleanBackupTable();
+
+  }
+
+  @Test
+  public void testRegionServerLogTimestampMap() throws IOException {
+    TreeSet<TableName> tables = new TreeSet<>();
+
+    tables.add(TableName.valueOf("t1"));
+    tables.add(TableName.valueOf("t2"));
+    tables.add(TableName.valueOf("t3"));
+
+    HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>();
+
+    rsTimestampMap.put("rs1", 100L);
+    rsTimestampMap.put("rs2", 101L);
+    rsTimestampMap.put("rs3", 103L);
+
+    table.writeRegionServerLogTimestamp(tables, rsTimestampMap);
+
+    HashMap<TableName, HashMap<String, Long>> result = table.readLogTimestampMap();
+
+    assertTrue(tables.size() == result.size());
+
+    for (TableName t : tables) {
+      HashMap<String, Long> rstm = result.get(t);
+      assertNotNull(rstm);
+      assertEquals(rstm.get("rs1"), new Long(100L));
+      assertEquals(rstm.get("rs2"), new Long(101L));
+      assertEquals(rstm.get("rs3"), new Long(103L));
+    }
+
+    Set<TableName> tables1 = new TreeSet<>();
+
+    tables1.add(TableName.valueOf("t3"));
+    tables1.add(TableName.valueOf("t4"));
+    tables1.add(TableName.valueOf("t5"));
+
+    HashMap<String, Long> rsTimestampMap1 = new HashMap<String, Long>();
+
+    rsTimestampMap1.put("rs1", 200L);
+    rsTimestampMap1.put("rs2", 201L);
+    rsTimestampMap1.put("rs3", 203L);
+
+    table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1);
+
+    result = table.readLogTimestampMap();
+
+    assertTrue(5 == result.size());
+
+    for (TableName t : tables) {
+      HashMap<String, Long> rstm = result.get(t);
+      assertNotNull(rstm);
+      if (t.equals(TableName.valueOf("t3")) == false) {
+        assertEquals(rstm.get("rs1"), new Long(100L));
+        assertEquals(rstm.get("rs2"), new Long(101L));
+        assertEquals(rstm.get("rs3"), new Long(103L));
+      } else {
+        assertEquals(rstm.get("rs1"), new Long(200L));
+        assertEquals(rstm.get("rs2"), new Long(201L));
+        assertEquals(rstm.get("rs3"), new Long(203L));
+      }
+    }
+
+    for (TableName t : tables1) {
+      HashMap<String, Long> rstm = result.get(t);
+      assertNotNull(rstm);
+      assertEquals(rstm.get("rs1"), new Long(200L));
+      assertEquals(rstm.get("rs2"), new Long(201L));
+      assertEquals(rstm.get("rs3"), new Long(203L));
+    }
+
+    cleanBackupTable();
+
+  }
+
+  @Test
+  public void testAddWALFiles() throws IOException {
+    List<String> files =
+        Arrays.asList("hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.1",
+          "hdfs://server/WALs/srv2,102,16666/srv2,102,16666.default.2",
+            "hdfs://server/WALs/srv3,103,17777/srv3,103,17777.default.3");
+    String newFile = "hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.5";
+
+    table.addWALFiles(files, "backup");
+
+    assertTrue(table.checkWALFile(files.get(0)));
+    assertTrue(table.checkWALFile(files.get(1)));
+    assertTrue(table.checkWALFile(files.get(2)));
+    assertFalse(table.checkWALFile(newFile));
+
+    cleanBackupTable();
+  }
+
+  private boolean compare(BackupContext ctx, BackupCompleteData data) {
+
+    return ctx.getBackupId().equals(data.getBackupToken())
+        && ctx.getTargetRootDir().equals(data.getBackupRootPath())
+        && ctx.getType().toString().equals(data.getType())
+        && ctx.getStartTs() == Long.parseLong(data.getStartTime())
+        && ctx.getEndTs() == Long.parseLong(data.getEndTime());
+
+  }
+
+  private boolean compare(BackupContext one, BackupContext two) {
+    return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType())
+        && one.getTargetRootDir().equals(two.getTargetRootDir())
+        && one.getStartTs() == two.getStartTs() && one.getEndTs() == two.getEndTs();
+  }
+
+  private BackupContext createBackupContext() {
+
+    BackupContext ctxt =
+        new BackupContext("backup_" + System.nanoTime(), BackupType.FULL,
+          new TableName[] {
+              TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") },
+          "/hbase/backup");
+    ctxt.setStartTs(System.currentTimeMillis());
+    ctxt.setEndTs(System.currentTimeMillis() + 1);
+    return ctxt;
+  }
+
+  private List<BackupContext> createBackupContextList(int size) {
+    List<BackupContext> list = new ArrayList<BackupContext>();
+    for (int i = 0; i < size; i++) {
+      list.add(createBackupContext());
+      try {
+        Thread.sleep(10);
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+    }
+    return list;
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    if (cluster != null) cluster.shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
new file mode 100644
index 0000000..d9bade1
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestFullBackup extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestFullBackup.class);
+
+  /**
+   * Verify that full backup is created on a single table with data correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupSingle() throws Exception {
+    LOG.info("test full backup on a single table with data");
+    List<TableName> tables = Lists.newArrayList(table1);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+    LOG.info("backup complete");
+  }
+
+  /**
+   * Verify that full backup is created on multiple tables correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupMultiple() throws Exception {
+    LOG.info("create full backup image on multiple tables with data");
+    List<TableName> tables = Lists.newArrayList(table1, table1);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup is created on all tables correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupAll() throws Exception {
+    LOG.info("create full backup image on all tables");
+    String backupId = getBackupClient().create(BackupType.FULL, null, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
new file mode 100644
index 0000000..e4c4a07
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestFullRestore extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestFullRestore.class);
+
+  /**
+   * Verify that a single table is restored to a new table
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreSingle() throws Exception {
+
+    LOG.info("test full restore on a single table empty table");
+
+    List<TableName> tables = Lists.newArrayList(table1);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+    LOG.info("backup complete");
+
+    TableName[] tableset = new TableName[] { table1 };
+    TableName[] tablemap = new TableName[] { table1_restore };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false);
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table1_restore));
+    TEST_UTIL.deleteTable(table1_restore);
+    hba.close();
+  }
+
+  /**
+   * Verify that multiple tables are restored to new tables.
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreMultiple() throws Exception {
+    LOG.info("create full backup image on multiple tables");
+    List<TableName> tables = Lists.newArrayList(table2, table3);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+
+    TableName[] restore_tableset = new TableName[] { table2, table3 };
+    TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupId, false, false,
+      restore_tableset, tablemap, false);
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table2_restore));
+    assertTrue(hba.tableExists(table3_restore));
+    TEST_UTIL.deleteTable(table2_restore);
+    TEST_UTIL.deleteTable(table3_restore);
+    hba.close();
+  }
+
+  /**
+   * Verify that a single table is restored using overwrite
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreSingleOverwrite() throws Exception {
+
+    LOG.info("test full restore on a single table empty table");
+    List<TableName> tables = Lists.newArrayList(table1);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+    LOG.info("backup complete");
+
+    TableName[] tableset = new TableName[] { table1 };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, null,
+      true);
+  }
+
+  /**
+   * Verify that multiple tables are restored to new tables using overwrite.
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreMultipleOverwrite() throws Exception {
+    LOG.info("create full backup image on multiple tables");
+
+    List<TableName> tables = Lists.newArrayList(table2, table3);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+
+    TableName[] restore_tableset = new TableName[] { table2, table3 };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupId, false,
+      false, restore_tableset, null, true);
+  }
+
+  /**
+   * Verify that restore fails on a single table that does not exist.
+   * @throws Exception
+   */
+  @Test(expected = IOException.class)
+  public void testFullRestoreSingleDNE() throws Exception {
+
+    LOG.info("test restore fails on a single table that does not exist");
+    List<TableName> tables = Lists.newArrayList(table1);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+    LOG.info("backup complete");
+
+    TableName[] tableset = new TableName[] { TableName.valueOf("faketable") };
+    TableName[] tablemap = new TableName[] { table1_restore };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+      false);
+  }
+
+  /**
+   * Verify that restore fails on multiple tables that do not exist.
+   * @throws Exception
+   */
+  @Test(expected = IOException.class)
+  public void testFullRestoreMultipleDNE() throws Exception {
+
+    LOG.info("test restore fails on multiple tables that do not exist");
+
+    List<TableName> tables = Lists.newArrayList(table2, table3);
+    String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+
+    TableName[] restore_tableset
+      = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") };
+    TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupId, false,
+      false, restore_tableset, tablemap, false);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
new file mode 100644
index 0000000..23b1af1
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackup extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class);
+  //implement all testcases in 1 test since incremental backup/restore has dependencies
+  @Test
+  public void TestIncBackupRestore() throws Exception {
+    HBackupFileSystem hbfs;
+
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tables = Lists.newArrayList(table1, table2, table3, table4);
+    String backupIdFull = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+
+    assertTrue(checkSucceeded(backupIdFull));
+
+    Connection conn = ConnectionFactory.createConnection(conf1);
+    // #2 - insert some data to table
+    HTable t1 = (HTable) conn.getTable(table1);
+    Put p1;
+    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+      p1 = new Put(Bytes.toBytes("row-t1" + i));
+      p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+      t1.put(p1);
+    }
+
+    Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+    t1.close();
+
+    HTable t2 =  (HTable) conn.getTable(table2);
+    Put p2;
+    for (int i = 0; i < 5; i++) {
+      p2 = new Put(Bytes.toBytes("row-t2" + i));
+      p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+      t2.put(p2);
+    }
+
+    Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+    t2.close();
+
+    // #3 - incremental backup for multiple tables
+
+
+    tables = Lists.newArrayList(table1, table2, table3);
+    String backupIdIncMultiple = getBackupClient().create(BackupType.INCREMENTAL,
+      tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupIdIncMultiple));
+
+
+    // #4 - restore full backup for all tables, without overwrite
+    TableName[] tablesRestoreFull =
+        new TableName[] { table1, table2, table3, table4 };
+
+    TableName[] tablesMapFull =
+        new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore };
+
+    hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdFull);
+    RestoreClient client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupIdFull, false, false,
+      tablesRestoreFull,
+      tablesMapFull, false);
+
+    // #5.1 - check tables for full restore
+    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hAdmin.tableExists(table1_restore));
+    assertTrue(hAdmin.tableExists(table2_restore));
+    assertTrue(hAdmin.tableExists(table3_restore));
+    assertTrue(hAdmin.tableExists(table4_restore));
+
+    hAdmin.close();
+
+    // #5.2 - checking row count of tables for full restore
+    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table2_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table3_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table4_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+
+    // #6 - restore incremental backup for multiple tables, with overwrite
+    TableName[] tablesRestoreIncMultiple =
+        new TableName[] { table1, table2, table3 };
+    TableName[] tablesMapIncMultiple =
+        new TableName[] { table1_restore, table2_restore, table3_restore };
+    hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
+    client = getRestoreClient();
+    client.restore(BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
+      tablesRestoreIncMultiple, tablesMapIncMultiple, true);
+
+    hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table2_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+    hTable.close();
+
+    hTable = (HTable) conn.getTable(table3_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+
+    // #7 - incremental backup for single, empty table
+
+    tables = toList(table4.getNameAsString());
+    String backupIdIncEmpty =
+        getBackupClient().create(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupIdIncEmpty));
+
+
+    // #8 - restore incremental backup for single empty table, with overwrite
+    TableName[] tablesRestoreIncEmpty = new TableName[] { table4 };
+    TableName[] tablesMapIncEmpty = new TableName[] { table4_restore };
+    hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncEmpty);
+
+    getRestoreClient().restore(BACKUP_ROOT_DIR, backupIdIncEmpty, false, false,
+      tablesRestoreIncEmpty,
+      tablesMapIncEmpty, true);
+
+    hTable = (HTable) conn.getTable(table4_restore);
+    Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+    hTable.close();
+    conn.close();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
new file mode 100644
index 0000000..035188c
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestRemoteBackup extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
+
+  /**
+   * Verify that a remote full backup is created on a single table with data correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupRemote() throws Exception {
+
+    LOG.info("test remote full backup on a single table");
+
+    String backupId =
+        getBackupClient().create(BackupType.FULL,
+          Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
+    LOG.info("backup complete");
+    assertTrue(checkSucceeded(backupId));
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
new file mode 100644
index 0000000..6a66a0d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteRestore extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
+
+  /**
+   * Verify that a remote restore on a single table is successful.
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreRemote() throws Exception {
+
+    LOG.info("test remote full backup on a single table");
+    String backupId =
+        getBackupClient().create(BackupType.FULL, toList(table1.getNameAsString()),
+          BACKUP_REMOTE_ROOT_DIR);
+    LOG.info("backup complete");
+    assertTrue(checkSucceeded(backupId));
+    TableName[] tableset = new TableName[] { table1 };
+    TableName[] tablemap = new TableName[] { table1_restore };
+    Path path = new Path(BACKUP_REMOTE_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    getRestoreClient().restore(BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset,
+      tablemap, false);
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table1_restore));
+    TEST_UTIL.deleteTable(table1_restore);
+    hba.close();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
new file mode 100644
index 0000000..6f9b3c7
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRestoreBoundaryTests extends TestBackupBase {
+
+  private static final Log LOG = LogFactory.getLog(TestRestoreBoundaryTests.class);
+
+  /**
+   * Verify that a single empty table is restored to a new table
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreSingleEmpty() throws Exception {
+    LOG.info("test full restore on a single table empty table");
+    String backupId =
+        getBackupClient().create(BackupType.FULL, toList(table1.getNameAsString()),
+            BACKUP_ROOT_DIR);
+    LOG.info("backup complete");
+    assertTrue(checkSucceeded(backupId));
+    TableName[] tableset = new TableName[] { table1 };
+    TableName[] tablemap = new TableName[] { table1_restore };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    getRestoreClient().restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+      false);
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table1_restore));
+    TEST_UTIL.deleteTable(table1_restore);
+  }
+
+  /**
+   * Verify that multiple tables are restored to new tables.
+   * @throws Exception
+   */
+  @Test
+  public void testFullRestoreMultipleEmpty() throws Exception {
+    LOG.info("create full backup image on multiple tables");
+
+    List<TableName> tables = toList(table2.getNameAsString(), table3.getNameAsString());
+    String backupId = getBackupClient().create(BackupType.FULL, tables,BACKUP_ROOT_DIR);
+    assertTrue(checkSucceeded(backupId));
+    TableName[] restore_tableset = new TableName[] { table2, table3};
+    TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
+    Path path = new Path(BACKUP_ROOT_DIR);
+    HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+    getRestoreClient().restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+      tablemap,
+      false);
+    HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+    assertTrue(hba.tableExists(table2_restore));
+    assertTrue(hba.tableExists(table3_restore));
+    TEST_UTIL.deleteTable(table2_restore);
+    TEST_UTIL.deleteTable(table3_restore);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
index 7620bbb..cd2efad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
@@ -49,7 +49,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager {
   private ProcedureMember member;
 
   @Override
-  public void initialize(RegionServerServices rss) throws KeeperException {
+  public void initialize(RegionServerServices rss) throws IOException {
     this.rss = rss;
     ZooKeeperWatcher zkw = rss.getZooKeeper();
     this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature());