You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/12/07 21:08:12 UTC

[1/3] hbase git commit: HBASE-14123 patch v40 (Vladimir)

Repository: hbase
Updated Branches:
  refs/heads/14123 f976dd12f -> 7c1eb6536


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
index 8a01a65..088bebc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -72,7 +71,7 @@ public class RestoreServerUtil {
 
   public static final Log LOG = LogFactory.getLog(RestoreServerUtil.class);
 
-  private final String[] ignoreDirs = { "recovered.edits" };
+  private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
 
   private final long TABLE_AVAILABILITY_WAIT_TIME = 180000;
 
@@ -545,23 +544,6 @@ public class RestoreServerUtil {
    */
   private LoadIncrementalHFiles createLoader(Path tableArchivePath, boolean multipleTables)
       throws IOException {
-    // set configuration for restore:
-    // LoadIncrementalHFile needs more time
-    // <name>hbase.rpc.timeout</name> <value>600000</value>
-    // calculates
-    Integer milliSecInMin = 60000;
-    Integer previousMillis = this.conf.getInt("hbase.rpc.timeout", 0);
-    Integer numberOfFilesInDir =
-        multipleTables ? getMaxNumberOfFilesInSubDir(tableArchivePath) :
-            getNumberOfFilesInDir(tableArchivePath);
-    Integer calculatedMillis = numberOfFilesInDir * milliSecInMin; // 1 minute per file
-    Integer resultMillis = Math.max(calculatedMillis, previousMillis);
-    if (resultMillis > previousMillis) {
-      LOG.info("Setting configuration for restore with LoadIncrementalHFile: "
-          + "hbase.rpc.timeout to " + calculatedMillis / milliSecInMin
-          + " minutes, to handle the number of files in backup " + tableArchivePath);
-      this.conf.setInt("hbase.rpc.timeout", resultMillis);
-    }
 
     // By default, it is 32 and loader will fail if # of files in any region exceed this
     // limit. Bad for snapshot restore.
@@ -632,7 +614,7 @@ public class RestoreServerUtil {
               || HFileLink.isHFileLink(hfile.getName())) {
             continue;
           }
-          HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf);
+          HFile.Reader reader = HFile.createReader(fs, hfile, conf);
           final byte[] first, last;
           try {
             reader.loadFileInfo();

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat2.java
index a00d390..b54a859 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat2.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -87,7 +86,7 @@ public class HFileInputFormat2 extends FileInputFormat<NullWritable, Cell> {
       Path path = fileSplit.getPath();
       FileSystem fs = path.getFileSystem(conf);
       LOG.info("Initialize HFileRecordReader for {}", path);
-      this.in = HFile.createReader(fs, path, new CacheConfig(conf), conf);
+      this.in = HFile.createReader(fs, path, conf);
 
       // The file info must be loaded before the scanner can be used.
       // This seems like a bug in HBase, but it's easily worked around.

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index ae4d02e..ed6e2a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -99,7 +99,6 @@ import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Multimaps;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 /**
  * Tool to load the output of HFileOutputFormat into an existing table.
  */
@@ -963,7 +962,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
         boolean success = false;
         try {
           LOG.debug("Going to connect to server " + getLocation() + " for row "
-              + Bytes.toStringBinary(getRow()) + " with hfile group " + famPaths);
+              + Bytes.toStringBinary(getRow()) + " with hfile group " +
+              LoadIncrementalHFiles.this.toString( famPaths));
           byte[] regionName = getLocation().getRegionInfo().getRegionName();
           try (Table table = conn.getTable(getTableName())) {
             secureClient = new SecureBulkLoadClient(getConf(), table);
@@ -1031,6 +1031,21 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
     }
   }
 
+  private final String toString(List<Pair<byte[], String>> list) {
+    StringBuffer sb = new StringBuffer();
+    sb.append("[");
+    if(list != null){
+      for(Pair<byte[], String> pair: list) {
+        sb.append("{");
+        sb.append(Bytes.toStringBinary(pair.getFirst()));
+        sb.append(",");
+        sb.append(pair.getSecond());
+        sb.append("}");
+      }
+    }
+    sb.append("]");
+    return sb.toString();
+  }
   private boolean isSecureBulkLoadEndpointAvailable() {
     String classes = getConf().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
     return classes.contains("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
index 9d75e2e..189b470 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
@@ -49,10 +49,9 @@ public class ZKProcedureCoordinatorRpcs implements ProcedureCoordinatorRpcs {
    * @param procedureClass procedure type name is a category for when there are multiple kinds of
    *    procedures.-- this becomes a znode so be aware of the naming restrictions
    * @param coordName name of the node running the coordinator
-   * @throws KeeperException if an unexpected zk error occurs
    */
   public ZKProcedureCoordinatorRpcs(ZooKeeperWatcher watcher,
-      String procedureClass, String coordName) throws IOException {
+      String procedureClass, String coordName) {
     this.watcher = watcher;
     this.procedureType = procedureClass;
     this.coordName = coordName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 6ac88be..4ab5ea6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -47,6 +47,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.management.MalformedObjectNameException;
@@ -374,7 +375,7 @@ public class HRegionServer extends HasThread implements
 
   // WAL roller. log is protected rather than private to avoid
   // eclipse warning when accessed by inner classes
-  public final LogRoller walRoller;
+  protected final LogRoller walRoller;
 
   // flag set after we're done setting up server threads
   final AtomicBoolean online = new AtomicBoolean(false);
@@ -1911,6 +1912,10 @@ public class HRegionServer extends HasThread implements
     return wal;
   }
 
+  public LogRoller getWalRoller() {
+    return walRoller;
+  }
+
   @Override
   public Connection getConnection() {
     return getClusterConnection();

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index c95fdb0..05f166a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -174,7 +174,7 @@ public class TestBackupBase {
     String backupId;
     try {
       conn = ConnectionFactory.createConnection(conf1);
-      badmin = new HBaseBackupAdmin(conn);
+      badmin = new BackupAdminImpl(conn);
       BackupRequest request = new BackupRequest();
       request.setBackupType(type).setTableList(tables).setTargetRootDir(path);
       backupId = badmin.backupTables(request);
@@ -272,7 +272,7 @@ public class TestBackupBase {
   }
 
   protected BackupAdmin getBackupAdmin() throws IOException {
-    return new HBaseBackupAdmin(TEST_UTIL.getConnection());
+    return new BackupAdminImpl(TEST_UTIL.getConnection());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
index 3a632fc..af9691a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
@@ -32,6 +32,19 @@ import org.junit.experimental.categories.Category;
 
 @Category(SmallTests.class)
 public class TestBackupCommandLineTool {
+
+  private final static String USAGE_DESCRIBE = "Usage: bin/hbase backup describe <backup_id>";
+  private final static String USAGE_CREATE = "Usage: bin/hbase backup create";
+  private final static String USAGE_HISTORY = "Usage: bin/hbase backup history";
+  private final static String USAGE_BACKUP = "Usage: bin/hbase backup";
+  private final static String USAGE_DELETE = "Usage: bin/hbase backup delete";
+  private final static String USAGE_PROGRESS = "Usage: bin/hbase backup progress";
+  private final static String USAGE_SET = "Usage: bin/hbase backup set";
+  private final static String USAGE_RESTORE = "Usage: bin/hbase restore";
+
+
+
+
   Configuration conf;
   @Before
   public void setUpBefore() throws Exception {
@@ -48,7 +61,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup describe <backupId>") >= 0);
+    assertTrue(output.indexOf(USAGE_DESCRIBE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -57,7 +70,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup describe <backupId>") >= 0);
+    assertTrue(output.indexOf(USAGE_DESCRIBE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -66,7 +79,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup describe <backupId>") >= 0);
+    assertTrue(output.indexOf(USAGE_DESCRIBE) >= 0);
   }
 
   @Test
@@ -78,7 +91,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup create") >= 0);
+    assertTrue(output.indexOf(USAGE_CREATE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -87,7 +100,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup create") >= 0);
+    assertTrue(output.indexOf(USAGE_CREATE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -96,7 +109,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup create") >= 0);
+    assertTrue(output.indexOf(USAGE_CREATE) >= 0);
   }
 
   @Test
@@ -108,7 +121,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup history") >= 0);
+    assertTrue(output.indexOf(USAGE_HISTORY) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -117,7 +130,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup history") >= 0);
+    assertTrue(output.indexOf(USAGE_HISTORY) >= 0);
 
   }
 
@@ -130,7 +143,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup delete") >= 0);
+    assertTrue(output.indexOf(USAGE_DELETE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -139,7 +152,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup delete") >= 0);
+    assertTrue(output.indexOf(USAGE_DELETE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -148,7 +161,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup delete") >= 0);
+    assertTrue(output.indexOf(USAGE_DELETE) >= 0);
   }
 
   @Test
@@ -160,7 +173,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup progress") >= 0);
+    assertTrue(output.indexOf(USAGE_PROGRESS) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -169,7 +182,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup progress") >= 0);
+    assertTrue(output.indexOf(USAGE_PROGRESS) >= 0);
   }
 
   @Test
@@ -181,7 +194,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup set") >= 0);
+    assertTrue(output.indexOf(USAGE_SET) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -190,7 +203,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup set") >= 0);
+    assertTrue(output.indexOf(USAGE_SET) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -199,7 +212,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup set") >= 0);
+    assertTrue(output.indexOf(USAGE_SET) >= 0);
 
   }
 
@@ -212,7 +225,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -221,7 +234,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
   }
 
   @Test
@@ -233,7 +246,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase restore") >= 0);
+    assertTrue(output.indexOf(USAGE_RESTORE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -242,7 +255,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase restore") >= 0);
+    assertTrue(output.indexOf(USAGE_RESTORE) >= 0);
   }
 
   @Test
@@ -254,7 +267,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -263,7 +276,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
   }
 
 
@@ -277,7 +290,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -286,7 +299,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -295,7 +308,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -304,7 +317,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -313,7 +326,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup") >= 0);
+    assertTrue(output.indexOf(USAGE_BACKUP) >= 0);
   }
 
   @Test
@@ -325,7 +338,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase restore") >= 0);
+    assertTrue(output.indexOf(USAGE_RESTORE) >= 0);
 
   }
 
@@ -338,7 +351,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup create") >= 0);
+    assertTrue(output.indexOf(USAGE_CREATE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -347,7 +360,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup create") >= 0);
+    assertTrue(output.indexOf(USAGE_CREATE) >= 0);
 
     baos = new ByteArrayOutputStream();
     System.setOut(new PrintStream(baos));
@@ -356,7 +369,7 @@ public class TestBackupCommandLineTool {
 
     output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup create") >= 0);
+    assertTrue(output.indexOf(USAGE_CREATE) >= 0);
   }
 
   @Test
@@ -368,7 +381,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup delete") >= 0);
+    assertTrue(output.indexOf(USAGE_DELETE) >= 0);
 
   }
 
@@ -381,7 +394,7 @@ public class TestBackupCommandLineTool {
 
     String output = baos.toString();
     System.out.println(baos.toString());
-    assertTrue(output.indexOf("Usage: bin/hbase backup history") >= 0);
+    assertTrue(output.indexOf(USAGE_HISTORY) >= 0);
 
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
index 6db3bf3..57548fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
@@ -57,13 +57,13 @@ public class TestBackupDescribe extends TestBackupBase {
     assertTrue(ret < 0);
 
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    System.setOut(new PrintStream(baos));
+    System.setErr(new PrintStream(baos));
     args = new String[]{"progress" };
     ToolRunner.run(TEST_UTIL.getConfiguration(), new BackupDriver(), args);
 
     String output = baos.toString();
     LOG.info("Output from progress: " + output);
-    assertTrue(output.indexOf(BackupCommands.NO_INFO_FOUND) >= 0);
+    assertTrue(output.indexOf(BackupCommands.NO_ACTIVE_SESSION_FOUND) >= 0);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
index 1caba22..f4b9499 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
@@ -28,7 +28,7 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -60,7 +60,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase {
     HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
     admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdmin client = new HBaseBackupAdmin(conn);
+    BackupAdmin client = new BackupAdminImpl(conn);
     BackupRequest request = new BackupRequest();
     request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
     String backupIdFull = client.backupTables(request);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 9a845ba..38872d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -84,7 +84,7 @@ public class TestIncrementalBackup extends TestBackupBase {
 
     HBaseAdmin admin = null;
     admin = (HBaseAdmin) conn.getAdmin();
-    HBaseBackupAdmin client = new HBaseBackupAdmin(conn);
+    BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = new BackupRequest();
     request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
index 52e247c..973e787 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java
@@ -25,7 +25,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -63,7 +63,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase {
     HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
     admin = (HBaseAdmin) conn.getAdmin();
-    HBaseBackupAdmin client = new HBaseBackupAdmin(conn);
+    BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = new BackupRequest();
     request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 8609f7f..2741d7f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -213,17 +213,6 @@ public class MockNoopMasterServices implements MasterServices, Server {
     return null;  //To change body of implemented methods use File | Settings | File Templates.
   }
 
-
-  /*
-   * Restore table set
-   */
-  public long restoreTables(String backupRootDir,
-      String backupId, boolean check, List<TableName> sTableList,
-      List<TableName> tTableList, boolean isOverwrite, long nonceGroup, long nonce)
-          throws IOException {
-    return -1;
-  }
-
   @Override
   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
     return null;  //To change body of implemented methods use File | Settings | File Templates.

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 9adfaee..7c1158e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.apache.hadoop.hbase.wal.FSHLogProvider;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALSplitter;


[2/3] hbase git commit: HBASE-14123 patch v40 (Vladimir)

Posted by te...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index ab3f0f6..381cc0f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -32,6 +32,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -53,7 +54,6 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -90,7 +90,8 @@ public final class BackupSystemTable implements Closeable {
 
     @Override
     public String toString() {
-      return "/" + backupRoot + "/" + backupId + "/" + walFile;
+      return Path.SEPARATOR + backupRoot +
+          Path.SEPARATOR + backupId + Path.SEPARATOR + walFile;
     }
 
   }
@@ -121,8 +122,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public void updateBackupInfo(BackupInfo context) throws IOException {
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("update backup status in hbase:backup for: " + context.getBackupId()
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("update backup status in hbase:backup for: " + context.getBackupId()
           + " set status=" + context.getState());
     }
     try (Table table = connection.getTable(tableName)) {
@@ -139,8 +140,8 @@ public final class BackupSystemTable implements Closeable {
 
   public void deleteBackupInfo(String backupId) throws IOException {
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("delete backup status in hbase:backup for " + backupId);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("delete backup status in hbase:backup for " + backupId);
     }
     try (Table table = connection.getTable(tableName)) {
       Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId);
@@ -155,8 +156,8 @@ public final class BackupSystemTable implements Closeable {
    */
 
   public BackupInfo readBackupInfo(String backupId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read backup status from hbase:backup for: " + backupId);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("read backup status from hbase:backup for: " + backupId);
     }
 
     try (Table table = connection.getTable(tableName)) {
@@ -178,8 +179,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public String readBackupStartCode(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read backup start code from hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("read backup start code from hbase:backup");
     }
     try (Table table = connection.getTable(tableName)) {
       Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot);
@@ -203,8 +204,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write backup start code to hbase:backup " + startCode);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("write backup start code to hbase:backup " + startCode);
     }
     try (Table table = connection.getTable(tableName)) {
       Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot);
@@ -220,8 +221,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public HashMap<String, Long> readRegionServerLastLogRollResult(String backupRoot)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read region server last roll log result to hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("read region server last roll log result to hbase:backup");
     }
 
     Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot);
@@ -252,8 +253,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write region server last roll log result to hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("write region server last roll log result to hbase:backup");
     }
     try (Table table = connection.getTable(tableName)) {
       Put put =
@@ -269,8 +270,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public ArrayList<BackupInfo> getBackupHistory(boolean onlyCompleted) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get backup history from hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("get backup history from hbase:backup");
     }
     ArrayList<BackupInfo> list;
     BackupState state = onlyCompleted ? BackupState.COMPLETE : BackupState.ANY;
@@ -402,8 +403,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public ArrayList<BackupInfo> getBackupContexts(BackupState status) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get backup contexts from hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("get backup contexts from hbase:backup");
     }
 
     Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
@@ -434,8 +435,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public void writeRegionServerLogTimestamp(Set<TableName> tables,
       HashMap<String, Long> newTimestamps, String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("write RS log time stamps to hbase:backup for tables ["
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("write RS log time stamps to hbase:backup for tables ["
           + StringUtils.join(tables, ",") + "]");
     }
     List<Put> puts = new ArrayList<Put>();
@@ -462,8 +463,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public HashMap<TableName, HashMap<String, Long>> readLogTimestampMap(String backupRoot)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("read RS log ts from hbase:backup for root=" + backupRoot);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("read RS log ts from hbase:backup for root=" + backupRoot);
     }
 
     HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
@@ -498,7 +499,8 @@ public final class BackupSystemTable implements Closeable {
       Map<String, Long> map) {
     BackupProtos.TableServerTimestamp.Builder tstBuilder =
         BackupProtos.TableServerTimestamp.newBuilder();
-    tstBuilder.setTable(ProtobufUtil.toProtoTableNameShaded(table));
+    tstBuilder.setTable(
+      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
 
     for (Entry<String, Long> entry : map.entrySet()) {
       BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder();
@@ -519,7 +521,8 @@ public final class BackupSystemTable implements Closeable {
     HashMap<String, Long> map = new HashMap<String, Long>();
     List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
     for (BackupProtos.ServerTimestamp st : list) {
-      ServerName sn = ProtobufUtil.toServerNameShaded(st.getServer());
+      ServerName sn =
+          org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toServerName(st.getServer());
       map.put(sn.getHostname()+":"+sn.getPort(), st.getTimestamp());
     }
     return map;
@@ -532,8 +535,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public Set<TableName> getIncrementalBackupTableSet(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get incr backup table set from hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("get incremental backup table set from hbase:backup");
     }
     TreeSet<TableName> set = new TreeSet<>();
 
@@ -560,8 +563,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Add incremental backup table set to hbase:backup. ROOT=" + backupRoot
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Add incremental backup table set to hbase:backup. ROOT=" + backupRoot
           + " tables [" + StringUtils.join(tables, " ") + "]");
       for (TableName table : tables) {
         LOG.debug(table);
@@ -579,8 +582,8 @@ public final class BackupSystemTable implements Closeable {
    */
 
   public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Delete incremental backup table set to hbase:backup. ROOT=" + backupRoot);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Delete incremental backup table set to hbase:backup. ROOT=" + backupRoot);
     }
     try (Table table = connection.getTable(tableName)) {
       Delete delete = BackupSystemTableHelper.createDeleteForIncrBackupTableSet(backupRoot);
@@ -597,8 +600,8 @@ public final class BackupSystemTable implements Closeable {
    */
   public void addWALFiles(List<String> files, String backupId, String backupRoot)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("add WAL files to hbase:backup: " + backupId + " " + backupRoot + " files ["
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("add WAL files to hbase:backup: " + backupId + " " + backupRoot + " files ["
           + StringUtils.join(files, ",") + "]");
       for (String f : files) {
         LOG.debug("add :" + f);
@@ -617,8 +620,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public Iterator<WALItem> getWALFilesIterator(String backupRoot) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get WAL files from hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("get WAL files from hbase:backup");
     }
     final Table table = connection.getTable(tableName);
     Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot);
@@ -676,8 +679,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public boolean isWALFileDeletable(String file) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Check if WAL file has been already backed up in hbase:backup " + file);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Check if WAL file has been already backed up in hbase:backup " + file);
     }
     try (Table table = connection.getTable(tableName)) {
       Get get = BackupSystemTableHelper.createGetForCheckWALFile(file);
@@ -696,8 +699,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public boolean hasBackupSessions() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Has backup sessions from hbase:backup");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Has backup sessions from hbase:backup");
     }
     boolean result = false;
     Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
@@ -721,8 +724,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException
    */
   public List<String> listBackupSets() throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set list");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(" Backup set list");
     }
     List<String> list = new ArrayList<String>();
     Table table = null;
@@ -755,8 +758,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException
    */
   public List<TableName> describeBackupSet(String name) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set describe: " + name);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(" Backup set describe: " + name);
     }
     Table table = null;
     try {
@@ -789,8 +792,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException
    */
   public void addToBackupSet(String name, String[] newTables) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]");
     }
     Table table = null;
     String[] union = null;
@@ -836,8 +839,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException
    */
   public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ")
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ")
           + "]");
     }
     Table table = null;
@@ -892,8 +895,8 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException
    */
   public void deleteBackupSet(String name) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" Backup set delete: " + name);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(" Backup set delete: " + name);
     }
     Table table = null;
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
index ddf631d..33ff3ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
@@ -45,7 +45,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public final class BackupSystemTableHelper {
+
+final class BackupSystemTableHelper {
 
   /**
    * hbase:backup schema:

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
index 01c004f..f743e75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
@@ -19,236 +19,37 @@
 package org.apache.hadoop.hbase.backup.impl;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupCopyTask;
 import org.apache.hadoop.hbase.backup.BackupInfo;
 import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase;
 import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
 import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory;
 import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
 import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
 import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
 import org.apache.hadoop.hbase.backup.util.BackupServerUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 @InterfaceAudience.Private
-public class FullTableBackupClient {
+public class FullTableBackupClient extends TableBackupClient{
   private static final Log LOG = LogFactory.getLog(FullTableBackupClient.class);
 
-  private Configuration conf;
-  private Connection conn;
-  private String backupId;
-  private List<TableName> tableList;
-  HashMap<String, Long> newTimestamps = null;
-
-  private BackupManager backupManager;
-  private BackupInfo backupContext;
-
-  public FullTableBackupClient() {
-    // Required by the Procedure framework to create the procedure on replay
-  }
 
   public FullTableBackupClient(final Connection conn, final String backupId,
       BackupRequest request)
       throws IOException {
-    backupManager = new BackupManager(conn, conn.getConfiguration());
-    this.backupId = backupId;
-    this.tableList = request.getTableList();
-    this.conn = conn;
-    this.conf = conn.getConfiguration();
-    backupContext =
-        backupManager.createBackupContext(backupId, BackupType.FULL, tableList,
-          request.getTargetRootDir(),
-          request.getWorkers(), request.getBandwidth());
-    if (tableList == null || tableList.isEmpty()) {
-      this.tableList = new ArrayList<>(backupContext.getTables());
-    }
-  }
-
-  /**
-   * Begin the overall backup.
-   * @param backupContext backup context
-   * @throws IOException exception
-   */
-  static void beginBackup(BackupManager backupManager, BackupInfo backupContext) throws IOException {
-    backupManager.setBackupContext(backupContext);
-    // set the start timestamp of the overall backup
-    long startTs = EnvironmentEdgeManager.currentTime();
-    backupContext.setStartTs(startTs);
-    // set overall backup status: ongoing
-    backupContext.setState(BackupState.RUNNING);
-    LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + ".");
-
-    backupManager.updateBackupInfo(backupContext);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Backup session " + backupContext.getBackupId() + " has been started.");
-    }
-  }
-
-  private static String getMessage(Exception e) {
-    String msg = e.getMessage();
-    if (msg == null || msg.equals("")) {
-      msg = e.getClass().getName();
-    }
-    return msg;
-  }
-
-  /**
-   * Delete HBase snapshot for backup.
-   * @param backupCtx backup context
-   * @throws Exception exception
-   */
-  private static void
-      deleteSnapshot(final Connection conn, BackupInfo backupCtx, Configuration conf)
-          throws IOException {
-    LOG.debug("Trying to delete snapshot for full backup.");
-    for (String snapshotName : backupCtx.getSnapshotNames()) {
-      if (snapshotName == null) {
-        continue;
-      }
-      LOG.debug("Trying to delete snapshot: " + snapshotName);
-
-      try (Admin admin = conn.getAdmin();) {
-        admin.deleteSnapshot(snapshotName);
-      } catch (IOException ioe) {
-        LOG.debug("when deleting snapshot " + snapshotName, ioe);
-      }
-      LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupCtx.getBackupId()
-          + " succeeded.");
-    }
-  }
-
-  /**
-   * Clean up directories with prefix "exportSnapshot-", which are generated when exporting
-   * snapshots.
-   * @throws IOException exception
-   */
-  private static void cleanupExportSnapshotLog(Configuration conf) throws IOException {
-    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
-    Path stagingDir =
-        new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
-            .toString()));
-    FileStatus[] files = FSUtils.listStatus(fs, stagingDir);
-    if (files == null) {
-      return;
-    }
-    for (FileStatus file : files) {
-      if (file.getPath().getName().startsWith("exportSnapshot-")) {
-        LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
-        if (FSUtils.delete(fs, file.getPath(), true) == false) {
-          LOG.warn("Can not delete " + file.getPath());
-        }
-      }
-    }
-  }
-
-  /**
-   * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
-   * copy phase.
-   */
-  static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) {
-    try {
-      // clean up the uncompleted data at target directory if the ongoing backup has already entered
-      // the copy phase
-      LOG.debug("Trying to cleanup up target dir. Current backup phase: "
-          + backupContext.getPhase());
-      if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
-          || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
-          || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
-        FileSystem outputFs =
-            FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
-
-        // now treat one backup as a transaction, clean up data that has been partially copied at
-        // table level
-        for (TableName table : backupContext.getTables()) {
-          Path targetDirPath =
-              new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
-                backupContext.getBackupId(), table));
-          if (outputFs.delete(targetDirPath, true)) {
-            LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString()
-                + " done.");
-          } else {
-            LOG.info("No data has been copied to " + targetDirPath.toString() + ".");
-          }
-
-          Path tableDir = targetDirPath.getParent();
-          FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
-          if (backups == null || backups.length == 0) {
-            outputFs.delete(tableDir, true);
-            LOG.debug(tableDir.toString() + " is empty, remove it.");
-          }
-        }
-      }
-
-    } catch (IOException e1) {
-      LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at "
-          + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
-    }
-  }
-
-  /**
-   * Fail the overall backup.
-   * @param backupContext backup context
-   * @param e exception
-   * @throws Exception exception
-   */
-  static void failBackup(Connection conn, BackupInfo backupContext, BackupManager backupManager,
-      Exception e, String msg, BackupType type, Configuration conf) throws IOException {
-    LOG.error(msg + getMessage(e), e);
-    // If this is a cancel exception, then we've already cleaned.
-
-    // set the failure timestamp of the overall backup
-    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
-
-    // set failure message
-    backupContext.setFailedMsg(e.getMessage());
-
-    // set overall backup status: failed
-    backupContext.setState(BackupState.FAILED);
-
-    // compose the backup failed data
-    String backupFailedData =
-        "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs()
-            + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase()
-            + ",failedmessage=" + backupContext.getFailedMsg();
-    LOG.error(backupFailedData);
-
-    backupManager.updateBackupInfo(backupContext);
-
-    // if full backup, then delete HBase snapshots if there already are snapshots taken
-    // and also clean up export snapshot log files if exist
-    if (type == BackupType.FULL) {
-      deleteSnapshot(conn, backupContext, conf);
-      cleanupExportSnapshotLog(conf);
-    }
-
-    // clean up the uncompleted data at target directory if the ongoing backup has already entered
-    // the copy phase
-    // For incremental backup, DistCp logs will be cleaned with the targetDir.
-    cleanupTargetDir(backupContext, conf);
-
-    LOG.info("Backup " + backupContext.getBackupId() + " failed.");
+    super(conn, backupId, request);
   }
 
   /**
@@ -296,162 +97,10 @@ public class FullTableBackupClient {
   }
 
   /**
-   * Add manifest for the current backup. The manifest is stored within the table backup directory.
-   * @param backupContext The current backup context
-   * @throws IOException exception
-   * @throws BackupException exception
-   */
-  private static void addManifest(BackupInfo backupContext, BackupManager backupManager,
-      BackupType type, Configuration conf) throws IOException, BackupException {
-    // set the overall backup phase : store manifest
-    backupContext.setPhase(BackupPhase.STORE_MANIFEST);
-
-    BackupManifest manifest;
-
-    // Since we have each table's backup in its own directory structure,
-    // we'll store its manifest with the table directory.
-    for (TableName table : backupContext.getTables()) {
-      manifest = new BackupManifest(backupContext, table);
-      ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupContext, table);
-      for (BackupImage image : ancestors) {
-        manifest.addDependentImage(image);
-      }
-
-      if (type == BackupType.INCREMENTAL) {
-        // We'll store the log timestamps for this table only in its manifest.
-        HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-            new HashMap<TableName, HashMap<String, Long>>();
-        tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table));
-        manifest.setIncrTimestampMap(tableTimestampMap);
-        ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupContext);
-        for (BackupImage image : ancestorss) {
-          manifest.addDependentImage(image);
-        }
-      }
-      manifest.store(conf);
-    }
-
-    // For incremental backup, we store a overall manifest in
-    // <backup-root-dir>/WALs/<backup-id>
-    // This is used when created the next incremental backup
-    if (type == BackupType.INCREMENTAL) {
-      manifest = new BackupManifest(backupContext);
-      // set the table region server start and end timestamps for incremental backup
-      manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap());
-      ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupContext);
-      for (BackupImage image : ancestors) {
-        manifest.addDependentImage(image);
-      }
-      manifest.store(conf);
-    }
-  }
-
-  /**
-   * Get backup request meta data dir as string.
-   * @param backupContext backup context
-   * @return meta data dir
-   */
-  private static String obtainBackupMetaDataStr(BackupInfo backupContext) {
-    StringBuffer sb = new StringBuffer();
-    sb.append("type=" + backupContext.getType() + ",tablelist=");
-    for (TableName table : backupContext.getTables()) {
-      sb.append(table + ";");
-    }
-    if (sb.lastIndexOf(";") > 0) {
-      sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
-    }
-    sb.append(",targetRootDir=" + backupContext.getTargetRootDir());
-
-    return sb.toString();
-  }
-
-  /**
-   * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying
-   * hlogs.
-   * @throws IOException exception
-   */
-  private static void cleanupDistCpLog(BackupInfo backupContext, Configuration conf)
-      throws IOException {
-    Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent();
-    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
-    FileStatus[] files = FSUtils.listStatus(fs, rootPath);
-    if (files == null) {
-      return;
-    }
-    for (FileStatus file : files) {
-      if (file.getPath().getName().startsWith("_distcp_logs")) {
-        LOG.debug("Delete log files of DistCp: " + file.getPath().getName());
-        FSUtils.delete(fs, file.getPath(), true);
-      }
-    }
-  }
-
-  /**
-   * Complete the overall backup.
-   * @param backupContext backup context
-   * @throws Exception exception
-   */
-  static void completeBackup(final Connection conn, BackupInfo backupContext,
-      BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
-    // set the complete timestamp of the overall backup
-    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
-    // set overall backup status: complete
-    backupContext.setState(BackupState.COMPLETE);
-    backupContext.setProgress(100);
-    // add and store the manifest for the backup
-    addManifest(backupContext, backupManager, type, conf);
-
-    // after major steps done and manifest persisted, do convert if needed for incremental backup
-    /* in-fly convert code here, provided by future jira */
-    LOG.debug("in-fly convert code here, provided by future jira");
-
-    // compose the backup complete data
-    String backupCompleteData =
-        obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs()
-            + ",completets=" + backupContext.getEndTs() + ",bytescopied="
-            + backupContext.getTotalBytesCopied();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData);
-    }
-    backupManager.updateBackupInfo(backupContext);
-
-    // when full backup is done:
-    // - delete HBase snapshot
-    // - clean up directories with prefix "exportSnapshot-", which are generated when exporting
-    // snapshots
-    if (type == BackupType.FULL) {
-      deleteSnapshot(conn, backupContext, conf);
-      cleanupExportSnapshotLog(conf);
-    } else if (type == BackupType.INCREMENTAL) {
-      cleanupDistCpLog(backupContext, conf);
-    }
-
-    LOG.info("Backup " + backupContext.getBackupId() + " completed.");
-  }
-
-  /**
-   * Wrap a SnapshotDescription for a target table.
-   * @param table table
-   * @return a SnapshotDescription especially for backup.
-   */
-  static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) {
-    // Mock a SnapshotDescription from backupContext to call SnapshotManager function,
-    // Name it in the format "snapshot_<timestamp>_<table>"
-    HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
-    builder.setTable(tableName.getNameAsString());
-    builder.setName(snapshotName);
-    HBaseProtos.SnapshotDescription backupSnapshot = builder.build();
-
-    LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName()
-        + " from backupContext to request snapshot for backup.");
-
-    return backupSnapshot;
-  }
-
-  /**
    * Backup request execution
    * @throws IOException
    */
+  @Override
   public void execute() throws IOException {
 
     try (Admin admin = conn.getAdmin();) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java
deleted file mode 100644
index 57596c8..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java
+++ /dev/null
@@ -1,555 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Future;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupAdmin;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
-import org.apache.hadoop.hbase.backup.util.BackupSet;
-import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import com.google.common.collect.Lists;
-
-/**
- * The administrative API implementation for HBase Backup . Create an instance from
- * {@link HBaseBackupAdmin#HBaseBackupAdmin(Connection)} and call {@link #close()} afterwards.
- * <p>BackupAdmin can be used to create backups, restore data from backups and for
- * other backup-related operations.
- *
- * @see Admin
- * @since 2.0
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-
-public class HBaseBackupAdmin implements BackupAdmin {
-  private static final Log LOG = LogFactory.getLog(HBaseBackupAdmin.class);
-
-  private final Connection conn;
-
-  public HBaseBackupAdmin(Connection conn) {
-    this.conn = conn;
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-
-  @Override
-  public BackupInfo getBackupInfo(String backupId) throws IOException {
-    BackupInfo backupInfo = null;
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      backupInfo = table.readBackupInfo(backupId);
-      return backupInfo;
-    }
-  }
-
-  @Override
-  public int getProgress(String backupId) throws IOException {
-    BackupInfo backupInfo = null;
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      if (backupId == null) {
-        ArrayList<BackupInfo> recentSessions = table.getBackupContexts(BackupState.RUNNING);
-        if (recentSessions.isEmpty()) {
-          LOG.warn("No ongoing sessions found.");
-          return -1;
-        }
-        // else show status for ongoing session
-        // must be one maximum
-        return recentSessions.get(0).getProgress();
-      } else {
-
-        backupInfo = table.readBackupInfo(backupId);
-        if (backupInfo != null) {
-          return backupInfo.getProgress();
-        } else {
-          LOG.warn("No information found for backupID=" + backupId);
-          return -1;
-        }
-      }
-    }
-  }
-
-  @Override
-  public int deleteBackups(String[] backupIds) throws IOException {
-    // TODO: requires FT, failure will leave system
-    // in non-consistent state
-    // see HBASE-15227
-
-    int totalDeleted = 0;
-    Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
-
-    try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
-      for (int i = 0; i < backupIds.length; i++) {
-        BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
-        if (info != null) {
-          String rootDir = info.getTargetRootDir();
-          HashSet<TableName> allTables = allTablesMap.get(rootDir);
-          if (allTables == null) {
-            allTables = new HashSet<TableName>();
-            allTablesMap.put(rootDir, allTables);
-          }
-          allTables.addAll(info.getTableNames());
-          totalDeleted += deleteBackup(backupIds[i], sysTable);
-        }
-      }
-      finalizeDelete(allTablesMap, sysTable);
-    }
-    return totalDeleted;
-  }
-
-  /**
-   * Updates incremental backup set for every backupRoot
-   * @param tablesMap - Map [backupRoot: Set<TableName>]
-   * @param table - backup system table
-   * @throws IOException
-   */
-
-  private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
-      throws IOException {
-    for (String backupRoot : tablesMap.keySet()) {
-      Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
-      Map<TableName, ArrayList<BackupInfo>> tableMap =
-          table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
-      for(Map.Entry<TableName, ArrayList<BackupInfo>> entry: tableMap.entrySet()) {
-        if(entry.getValue() == null) {
-          // No more backups for a table
-          incrTableSet.remove(entry.getKey());
-        }
-      }
-      if (!incrTableSet.isEmpty()) {
-        table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
-      } else { // empty
-        table.deleteIncrementalBackupTableSet(backupRoot);
-      }
-    }
-  }
-
-  /**
-   * Delete single backup and all related backups
-   * Algorithm:
-   *
-   * Backup type: FULL or INCREMENTAL
-   * Is this last backup session for table T: YES or NO
-   * For every table T from table list 'tables':
-   * if(FULL, YES) deletes only physical data (PD)
-   * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until
-   * we either reach the most recent backup for T in the system or FULL backup which
-   * includes T
-   * if(INCREMENTAL, YES) deletes only physical data (PD)
-   * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images
-   * between last FULL backup, which is older than the backup being deleted and the next
-   * FULL backup (if exists) or last one for a particular table T and removes T from list
-   * of backup tables.
-   * @param backupId - backup id
-   * @param sysTable - backup system table
-   * @return total - number of deleted backup images
-   * @throws IOException
-   */
-  private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
-
-    BackupInfo backupInfo = sysTable.readBackupInfo(backupId);
-
-    int totalDeleted = 0;
-    if (backupInfo != null) {
-      LOG.info("Deleting backup " + backupInfo.getBackupId() + " ...");
-      BackupClientUtil.cleanupBackupData(backupInfo, conn.getConfiguration());
-      // List of tables in this backup;
-      List<TableName> tables = backupInfo.getTableNames();
-      long startTime = backupInfo.getStartTs();
-      for (TableName tn : tables) {
-        boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime);
-        if (isLastBackupSession) {
-          continue;
-        }
-        // else
-        List<BackupInfo> affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable);
-        for (BackupInfo info : affectedBackups) {
-          if (info.equals(backupInfo)) {
-            continue;
-          }
-          removeTableFromBackupImage(info, tn, sysTable);
-        }
-      }
-      LOG.debug("Delete backup info "+ backupInfo.getBackupId());
-
-      sysTable.deleteBackupInfo(backupInfo.getBackupId());
-      LOG.info("Delete backup " + backupInfo.getBackupId() + " completed.");
-      totalDeleted++;
-    } else {
-      LOG.warn("Delete backup failed: no information found for backupID=" + backupId);
-    }
-    return totalDeleted;
-  }
-
-  private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
-      throws IOException {
-    List<TableName> tables = info.getTableNames();
-    LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" +
-      info.getTableListAsString());
-    if (tables.contains(tn)) {
-      tables.remove(tn);
-
-      if (tables.isEmpty()) {
-        LOG.debug("Delete backup info "+ info.getBackupId());
-
-        sysTable.deleteBackupInfo(info.getBackupId());
-        BackupClientUtil.cleanupBackupData(info, conn.getConfiguration());
-      } else {
-        info.setTables(tables);
-        sysTable.updateBackupInfo(info);
-        // Now, clean up directory for table
-        cleanupBackupDir(info, tn, conn.getConfiguration());
-      }
-    }
-  }
-
-  private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn,
-      BackupSystemTable table) throws IOException {
-    LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
-    long ts = backupInfo.getStartTs();
-    List<BackupInfo> list = new ArrayList<BackupInfo>();
-    List<BackupInfo> history = table.getBackupHistory(backupInfo.getTargetRootDir());
-    // Scan from most recent to backupInfo
-    // break when backupInfo reached
-    for (BackupInfo info : history) {
-      if (info.getStartTs() == ts) {
-        break;
-      }
-      List<TableName> tables = info.getTableNames();
-      if (tables.contains(tn)) {
-        BackupType bt = info.getType();
-        if (bt == BackupType.FULL) {
-          // Clear list if we encounter FULL backup
-          list.clear();
-        } else {
-          LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
-              + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
-          list.add(info);
-        }
-      }
-    }
-    return list;
-  }
-
-
-
-  /**
-   * Clean up the data at target directory
-   * @throws IOException
-   */
-  private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
-      throws IOException {
-    try {
-      // clean up the data at target directory
-      String targetDir = backupInfo.getTargetRootDir();
-      if (targetDir == null) {
-        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
-        return;
-      }
-
-      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf);
-
-      Path targetDirPath =
-          new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(),
-            backupInfo.getBackupId(), table));
-      if (outputFs.delete(targetDirPath, true)) {
-        LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
-      } else {
-        LOG.info("No data has been found in " + targetDirPath.toString() + ".");
-      }
-
-    } catch (IOException e1) {
-      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
-          + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
-      throw e1;
-    }
-  }
-
-  private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
-      throws IOException {
-    List<BackupInfo> history = table.getBackupHistory();
-    for (BackupInfo info : history) {
-      List<TableName> tables = info.getTableNames();
-      if (!tables.contains(tn)) {
-        continue;
-      }
-      if (info.getStartTs() <= startTime) {
-        return true;
-      } else {
-        return false;
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public List<BackupInfo> getHistory(int n) throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<BackupInfo> history = table.getBackupHistory();
-      if (history.size() <= n) return history;
-      List<BackupInfo> list = new ArrayList<BackupInfo>();
-      for (int i = 0; i < n; i++) {
-        list.add(history.get(i));
-      }
-      return list;
-    }
-  }
-
-  @Override
-  public List<BackupInfo> getHistory(int n, BackupInfo.Filter ... filters) throws IOException {
-    if (filters.length == 0) return getHistory(n);
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<BackupInfo> history = table.getBackupHistory();
-      List<BackupInfo> result = new ArrayList<BackupInfo>();
-      for(BackupInfo bi: history) {
-        if(result.size() == n) break;
-        boolean passed = true;
-        for(int i=0; i < filters.length; i++) {
-          if(!filters[i].apply(bi)) {
-            passed = false;
-            break;
-          }
-        }
-        if(passed) {
-          result.add(bi);
-        }
-      }
-      return result;
-    }
-  }
-
-  @Override
-  public List<BackupSet> listBackupSets() throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<String> list = table.listBackupSets();
-      List<BackupSet> bslist = new ArrayList<BackupSet>();
-      for (String s : list) {
-        List<TableName> tables = table.describeBackupSet(s);
-        if (tables != null) {
-          bslist.add(new BackupSet(s, tables));
-        }
-      }
-      return bslist;
-    }
-  }
-
-  @Override
-  public BackupSet getBackupSet(String name) throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<TableName> list = table.describeBackupSet(name);
-      if (list == null) return null;
-      return new BackupSet(name, list);
-    }
-  }
-
-  @Override
-  public boolean deleteBackupSet(String name) throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      if (table.describeBackupSet(name) == null) {
-        return false;
-      }
-      table.deleteBackupSet(name);
-      return true;
-    }
-  }
-
-  @Override
-  public void addToBackupSet(String name, TableName[] tables) throws IOException {
-    String[] tableNames = new String[tables.length];
-    try (final BackupSystemTable table = new BackupSystemTable(conn);
-         final Admin admin = conn.getAdmin();) {
-      for (int i = 0; i < tables.length; i++) {
-        tableNames[i] = tables[i].getNameAsString();
-        if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
-          throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist");
-        }
-      }
-      table.addToBackupSet(name, tableNames);
-      LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name
-          + "' backup set");
-    }
-  }
-
-  @Override
-  public void removeFromBackupSet(String name, String[] tables) throws IOException {
-    LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      table.removeFromBackupSet(name, tables);
-      LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name
-          + "' completed.");
-    }
-  }
-
-  @Override
-  public void restore(RestoreRequest request) throws IOException {
-    if (request.isCheck()) {
-      HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>();
-      // check and load backup image manifest for the tables
-      Path rootPath = new Path(request.getBackupRootDir());
-      String backupId = request.getBackupId();
-      TableName[] sTableArray = request.getFromTables();
-      HBackupFileSystem.checkImageManifestExist(backupManifestMap,
-        sTableArray, conn.getConfiguration(), rootPath, backupId);
-
-      // Check and validate the backup image and its dependencies
-
-        if (RestoreServerUtil.validate(backupManifestMap, conn.getConfiguration())) {
-          LOG.info("Checking backup images: ok");
-        } else {
-          String errMsg = "Some dependencies are missing for restore";
-          LOG.error(errMsg);
-          throw new IOException(errMsg);
-        }
-
-    }
-    // Execute restore request
-    new RestoreTablesClient(conn, request).execute();
-  }
-
-  @Override
-  public Future<Void> restoreAsync(RestoreRequest request) throws IOException {
-    // TBI
-    return null;
-  }
-
-  @Override
-  public String backupTables(final BackupRequest request) throws IOException {
-    String setName = request.getBackupSetName();
-    BackupType type = request.getBackupType();
-    String targetRootDir = request.getTargetRootDir();
-    List<TableName> tableList = request.getTableList();
-
-    String backupId =
-        (setName == null || setName.length() == 0 ? BackupRestoreConstants.BACKUPID_PREFIX
-            : setName + "_") + EnvironmentEdgeManager.currentTime();
-    if (type == BackupType.INCREMENTAL) {
-      Set<TableName> incrTableSet = null;
-      try (BackupSystemTable table = new BackupSystemTable(conn)) {
-        incrTableSet = table.getIncrementalBackupTableSet(targetRootDir);
-      }
-
-      if (incrTableSet.isEmpty()) {
-        System.err.println("Incremental backup table set contains no table.\n"
-            + "Use 'backup create full' or 'backup stop' to \n "
-            + "change the tables covered by incremental backup.");
-        throw new IOException("No table covered by incremental backup.");
-      }
-
-      tableList.removeAll(incrTableSet);
-      if (!tableList.isEmpty()) {
-        String extraTables = StringUtils.join(tableList, ",");
-        System.err.println("Some tables (" + extraTables + ") haven't gone through full backup");
-        throw new IOException("Perform full backup on " + extraTables + " first, "
-            + "then retry the command");
-      }
-      System.out.println("Incremental backup for the following table set: " + incrTableSet);
-      tableList = Lists.newArrayList(incrTableSet);
-    }
-    if (tableList != null && !tableList.isEmpty()) {
-      for (TableName table : tableList) {
-        String targetTableBackupDir =
-            HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
-        Path targetTableBackupDirPath = new Path(targetTableBackupDir);
-        FileSystem outputFs =
-            FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
-        if (outputFs.exists(targetTableBackupDirPath)) {
-          throw new IOException("Target backup directory " + targetTableBackupDir
-              + " exists already.");
-        }
-      }
-      ArrayList<TableName> nonExistingTableList = null;
-      try (Admin admin = conn.getAdmin();) {
-        for (TableName tableName : tableList) {
-          if (!admin.tableExists(tableName)) {
-            if (nonExistingTableList == null) {
-              nonExistingTableList = new ArrayList<>();
-            }
-            nonExistingTableList.add(tableName);
-          }
-        }
-      }
-      if (nonExistingTableList != null) {
-        if (type == BackupType.INCREMENTAL) {
-          System.err.println("Incremental backup table set contains non-exising table: "
-              + nonExistingTableList);
-          // Update incremental backup set
-          tableList = excludeNonExistingTables(tableList, nonExistingTableList);
-        } else {
-          // Throw exception only in full mode - we try to backup non-existing table
-          throw new IOException("Non-existing tables found in the table list: "
-              + nonExistingTableList);
-        }
-      }
-    }
-
-    // update table list
-    request.setTableList(tableList);
-
-    if (type == BackupType.FULL) {
-      new FullTableBackupClient(conn, backupId, request).execute();
-    } else {
-      new IncrementalTableBackupClient(conn, backupId, request).execute();
-    }
-    return backupId;
-  }
-
-
-  private List<TableName> excludeNonExistingTables(List<TableName> tableList,
-      List<TableName> nonExistingTableList) {
-
-    for (TableName table : nonExistingTableList) {
-      tableList.remove(table);
-    }
-    return tableList;
-  }
-
-  @Override
-  public Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException {
-    // TBI
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 6fad17a..7f41c43 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -53,21 +53,16 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class IncrementalBackupManager {
+public class IncrementalBackupManager extends BackupManager{
   public static final Log LOG = LogFactory.getLog(IncrementalBackupManager.class);
 
-  // parent manager
-  private final BackupManager backupManager;
-  private final Configuration conf;
-
-  public IncrementalBackupManager(BackupManager bm) {
-    this.backupManager = bm;
-    this.conf = bm.getConf();
+  public IncrementalBackupManager(Connection conn, Configuration conf) throws IOException {
+    super(conn, conf);
   }
 
   /**
-   * Obtain the list of logs that need to be copied out for this incremental backup.
-   * The list is set in BackupContext.
+   * Obtain the list of logs that need to be copied out for this incremental backup. The list is set
+   * in BackupContext.
    * @param conn the Connection
    * @param backupContext backup context
    * @return The new HashMap of RS log timestamps after the log roll for this incremental backup.
@@ -79,12 +74,11 @@ public class IncrementalBackupManager {
     HashMap<String, Long> newTimestamps;
     HashMap<String, Long> previousTimestampMins;
 
-    String savedStartCode = backupManager.readBackupStartCode();
+    String savedStartCode = readBackupStartCode();
 
     // key: tableName
     // value: <RegionServer,PreviousTimeStamp>
-    HashMap<TableName, HashMap<String, Long>> previousTimestampMap =
-        backupManager.readLogTimestampMap();
+    HashMap<TableName, HashMap<String, Long>> previousTimestampMap = readLogTimestampMap();
 
     previousTimestampMins = BackupServerUtil.getRSLogTimestampMins(previousTimestampMap);
 
@@ -109,12 +103,12 @@ public class IncrementalBackupManager {
         LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
 
     }
-    newTimestamps = backupManager.readRegionServerLastLogRollResult();
+    newTimestamps = readRegionServerLastLogRollResult();
 
     logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode);
     List<WALItem> logFromSystemTable =
         getLogFilesFromBackupSystem(previousTimestampMins,
-      newTimestamps, backupManager.getBackupContext().getTargetRootDir());
+      newTimestamps, getBackupContext().getTargetRootDir());
     addLogsFromBackupSystemToContext(logFromSystemTable);
 
     logList = excludeAlreadyBackedUpWALs(logList, logFromSystemTable);
@@ -164,7 +158,7 @@ public class IncrementalBackupManager {
   private List<WALItem> getLogFilesFromBackupSystem(HashMap<String, Long> olderTimestamps,
       HashMap<String, Long> newestTimestamps, String backupRoot) throws IOException {
     List<WALItem> logFiles = new ArrayList<WALItem>();
-    Iterator<WALItem> it = backupManager.getWALFilesFromBackupSystem();
+    Iterator<WALItem> it = getWALFilesFromBackupSystem();
     while (it.hasNext()) {
       WALItem item = it.next();
       String rootDir = item.getBackupRoot();
@@ -299,7 +293,7 @@ public class IncrementalBackupManager {
        * last backup.
        */
       if (oldTimeStamp == null) {
-        if (currentLogTS < Long.parseLong(savedStartCode)) {
+        if (currentLogTS < Long.valueOf(savedStartCode)) {
           // This log file is really old, its region server was before our last backup.
           continue;
         } else {
@@ -345,7 +339,7 @@ public class IncrementalBackupManager {
       Long timestamp = null;
       try {
         timestamp = BackupClientUtil.getCreationTime(path);
-        return timestamp > lastBackupTS;
+        return timestamp > Long.valueOf(lastBackupTS);
       } catch (Exception e) {
         LOG.warn("Cannot read timestamp of log file " + path);
         return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 55be02c..c56afbd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -26,7 +26,6 @@ import java.util.List;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
@@ -44,33 +43,15 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 
 @InterfaceAudience.Private
-public class IncrementalTableBackupClient {
+public class IncrementalTableBackupClient extends TableBackupClient {
   private static final Log LOG = LogFactory.getLog(IncrementalTableBackupClient.class);
 
-  private Configuration conf;
-  private Connection conn;
-  //private String backupId;
-  HashMap<String, Long> newTimestamps = null;
-
-  private String backupId;
-  private BackupManager backupManager;
-  private BackupInfo backupContext;
-
-  public IncrementalTableBackupClient() {
-    // Required by the Procedure framework to create the procedure on replay
-  }
 
   public IncrementalTableBackupClient(final Connection conn, final String backupId,
       BackupRequest request)
-      throws IOException {
-
-    this.conn = conn;
-    this.conf = conn.getConfiguration();
-    backupManager = new BackupManager(conn, conf);
-    this.backupId = backupId;
-    backupContext =
-        backupManager.createBackupContext(backupId, BackupType.INCREMENTAL, request.getTableList(),
-          request.getTargetRootDir(), request.getWorkers(), (int) request.getBandwidth());
+      throws IOException
+  {
+    super(conn, backupId, request);
   }
 
   private List<String> filterMissingFiles(List<String> incrBackupFileList) throws IOException {
@@ -174,19 +155,19 @@ public class IncrementalTableBackupClient {
     return list;
   }
 
+  @Override
   public void execute() throws IOException {
 
     // case PREPARE_INCREMENTAL:
-    FullTableBackupClient.beginBackup(backupManager, backupContext);
+    beginBackup(backupManager, backupContext);
     LOG.debug("For incremental backup, current table set is "
         + backupManager.getIncrementalBackupTableSet());
     try {
-      IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager);
-
-      newTimestamps = incrBackupManager.getIncrBackupLogFileList(conn, backupContext);
+      newTimestamps = ((IncrementalBackupManager)backupManager).
+          getIncrBackupLogFileList(conn, backupContext);
     } catch (Exception e) {
       // fail the overall backup and return
-      FullTableBackupClient.failBackup(conn, backupContext, backupManager, e,
+      failBackup(conn, backupContext, backupManager, e,
         "Unexpected Exception : ", BackupType.INCREMENTAL, conf);
     }
 
@@ -200,7 +181,7 @@ public class IncrementalTableBackupClient {
     } catch (Exception e) {
       String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId;
       // fail the overall backup and return
-      FullTableBackupClient.failBackup(conn, backupContext, backupManager, e, msg,
+      failBackup(conn, backupContext, backupManager, e, msg,
         BackupType.INCREMENTAL, conf);
     }
     // case INCR_BACKUP_COMPLETE:
@@ -225,11 +206,11 @@ public class IncrementalTableBackupClient {
               .getRSLogTimestampMins(newTableSetTimestampMap));
       backupManager.writeBackupStartCode(newStartCode);
       // backup complete
-      FullTableBackupClient.completeBackup(conn, backupContext, backupManager,
+      completeBackup(conn, backupContext, backupManager,
         BackupType.INCREMENTAL, conf);
 
     } catch (IOException e) {
-      FullTableBackupClient.failBackup(conn, backupContext, backupManager, e,
+      failBackup(conn, backupContext, backupManager, e,
         "Unexpected Exception : ", BackupType.INCREMENTAL, conf);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
index 768910f..46fa46f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
@@ -51,10 +51,6 @@ public class RestoreTablesClient {
   private String targetRootDir;
   private boolean isOverwrite;
 
-  public RestoreTablesClient() {
-    // Required by the Procedure framework to create the procedure on replay
-  }
-
   public RestoreTablesClient(Connection conn, RestoreRequest request)
       throws IOException {
     this.targetRootDir = request.getBackupRootDir();

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
new file mode 100644
index 0000000..ce1ed8f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
@@ -0,0 +1,386 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+@InterfaceAudience.Private
+public abstract class TableBackupClient {
+  private static final Log LOG = LogFactory.getLog(TableBackupClient.class);
+
+  protected Configuration conf;
+  protected Connection conn;
+  protected String backupId;
+  protected List<TableName> tableList;
+  protected HashMap<String, Long> newTimestamps = null;
+
+  protected BackupManager backupManager;
+  protected BackupInfo backupContext;
+
+  public TableBackupClient(final Connection conn, final String backupId,
+      BackupRequest request)
+      throws IOException {
+    if (request.getBackupType() == BackupType.FULL) {
+      backupManager = new BackupManager(conn, conn.getConfiguration());
+    } else {
+      backupManager = new IncrementalBackupManager(conn, conn.getConfiguration());
+    }
+    this.backupId = backupId;
+    this.tableList = request.getTableList();
+    this.conn = conn;
+    this.conf = conn.getConfiguration();
+    backupContext =
+        backupManager.createBackupContext(backupId, request.getBackupType(), tableList,
+          request.getTargetRootDir(),
+          request.getWorkers(), request.getBandwidth());
+    if (tableList == null || tableList.isEmpty()) {
+      this.tableList = new ArrayList<>(backupContext.getTables());
+    }
+  }
+
+  /**
+   * Begin the overall backup.
+   * @param backupContext backup context
+   * @throws IOException exception
+   */
+  protected void beginBackup(BackupManager backupManager, BackupInfo backupContext) throws IOException {
+    backupManager.setBackupContext(backupContext);
+    // set the start timestamp of the overall backup
+    long startTs = EnvironmentEdgeManager.currentTime();
+    backupContext.setStartTs(startTs);
+    // set overall backup status: ongoing
+    backupContext.setState(BackupState.RUNNING);
+    LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + ".");
+
+    backupManager.updateBackupInfo(backupContext);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Backup session " + backupContext.getBackupId() + " has been started.");
+    }
+  }
+
+  private String getMessage(Exception e) {
+    String msg = e.getMessage();
+    if (msg == null || msg.equals("")) {
+      msg = e.getClass().getName();
+    }
+    return msg;
+  }
+
+  /**
+   * Delete HBase snapshot for backup.
+   * @param backupCtx backup context
+   * @throws Exception exception
+   */
+  private void
+      deleteSnapshot(final Connection conn, BackupInfo backupCtx, Configuration conf)
+          throws IOException {
+    LOG.debug("Trying to delete snapshot for full backup.");
+    for (String snapshotName : backupCtx.getSnapshotNames()) {
+      if (snapshotName == null) {
+        continue;
+      }
+      LOG.debug("Trying to delete snapshot: " + snapshotName);
+
+      try (Admin admin = conn.getAdmin();) {
+        admin.deleteSnapshot(snapshotName);
+      } catch (IOException ioe) {
+        LOG.debug("when deleting snapshot " + snapshotName, ioe);
+      }
+      LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupCtx.getBackupId()
+          + " succeeded.");
+    }
+  }
+
+  /**
+   * Clean up directories with prefix "exportSnapshot-", which are generated when exporting
+   * snapshots.
+   * @throws IOException exception
+   */
+  private void cleanupExportSnapshotLog(Configuration conf) throws IOException {
+    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+    Path stagingDir =
+        new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
+            .toString()));
+    FileStatus[] files = FSUtils.listStatus(fs, stagingDir);
+    if (files == null) {
+      return;
+    }
+    for (FileStatus file : files) {
+      if (file.getPath().getName().startsWith("exportSnapshot-")) {
+        LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
+        if (FSUtils.delete(fs, file.getPath(), true) == false) {
+          LOG.warn("Can not delete " + file.getPath());
+        }
+      }
+    }
+  }
+
+  /**
+   * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
+   * copy phase.
+   */
+   private void cleanupTargetDir(BackupInfo backupContext, Configuration conf) {
+    try {
+      // clean up the uncompleted data at target directory if the ongoing backup has already entered
+      // the copy phase
+      LOG.debug("Trying to cleanup up target dir. Current backup phase: "
+          + backupContext.getPhase());
+      if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
+          || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
+          || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
+        FileSystem outputFs =
+            FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
+
+        // now treat one backup as a transaction, clean up data that has been partially copied at
+        // table level
+        for (TableName table : backupContext.getTables()) {
+          Path targetDirPath =
+              new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
+                backupContext.getBackupId(), table));
+          if (outputFs.delete(targetDirPath, true)) {
+            LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString()
+                + " done.");
+          } else {
+            LOG.info("No data has been copied to " + targetDirPath.toString() + ".");
+          }
+
+          Path tableDir = targetDirPath.getParent();
+          FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
+          if (backups == null || backups.length == 0) {
+            outputFs.delete(tableDir, true);
+            LOG.debug(tableDir.toString() + " is empty, remove it.");
+          }
+        }
+      }
+
+    } catch (IOException e1) {
+      LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at "
+          + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
+    }
+  }
+
+  /**
+   * Fail the overall backup.
+   * @param backupContext backup context
+   * @param e exception
+   * @throws Exception exception
+   */
+  protected void failBackup(Connection conn, BackupInfo backupContext, BackupManager backupManager,
+      Exception e, String msg, BackupType type, Configuration conf) throws IOException {
+    LOG.error(msg + getMessage(e), e);
+    // If this is a cancel exception, then we've already cleaned.
+
+    // set the failure timestamp of the overall backup
+    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+
+    // set failure message
+    backupContext.setFailedMsg(e.getMessage());
+
+    // set overall backup status: failed
+    backupContext.setState(BackupState.FAILED);
+
+    // compose the backup failed data
+    String backupFailedData =
+        "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs()
+            + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase()
+            + ",failedmessage=" + backupContext.getFailedMsg();
+    LOG.error(backupFailedData);
+
+    backupManager.updateBackupInfo(backupContext);
+
+    // if full backup, then delete HBase snapshots if there already are snapshots taken
+    // and also clean up export snapshot log files if exist
+    if (type == BackupType.FULL) {
+      deleteSnapshot(conn, backupContext, conf);
+      cleanupExportSnapshotLog(conf);
+    }
+
+    // clean up the uncompleted data at target directory if the ongoing backup has already entered
+    // the copy phase
+    // For incremental backup, DistCp logs will be cleaned with the targetDir.
+    cleanupTargetDir(backupContext, conf);
+    LOG.info("Backup " + backupContext.getBackupId() + " failed.");
+  }
+
+
+  /**
+   * Add manifest for the current backup. The manifest is stored within the table backup directory.
+   * @param backupContext The current backup context
+   * @throws IOException exception
+   * @throws BackupException exception
+   */
+  private void addManifest(BackupInfo backupContext, BackupManager backupManager,
+      BackupType type, Configuration conf) throws IOException, BackupException {
+    // set the overall backup phase : store manifest
+    backupContext.setPhase(BackupPhase.STORE_MANIFEST);
+
+    BackupManifest manifest;
+
+    // Since we have each table's backup in its own directory structure,
+    // we'll store its manifest with the table directory.
+    for (TableName table : backupContext.getTables()) {
+      manifest = new BackupManifest(backupContext, table);
+      ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupContext, table);
+      for (BackupImage image : ancestors) {
+        manifest.addDependentImage(image);
+      }
+
+      if (type == BackupType.INCREMENTAL) {
+        // We'll store the log timestamps for this table only in its manifest.
+        HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
+            new HashMap<TableName, HashMap<String, Long>>();
+        tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table));
+        manifest.setIncrTimestampMap(tableTimestampMap);
+        ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupContext);
+        for (BackupImage image : ancestorss) {
+          manifest.addDependentImage(image);
+        }
+      }
+      manifest.store(conf);
+    }
+
+    // For incremental backup, we store a overall manifest in
+    // <backup-root-dir>/WALs/<backup-id>
+    // This is used when created the next incremental backup
+    if (type == BackupType.INCREMENTAL) {
+      manifest = new BackupManifest(backupContext);
+      // set the table region server start and end timestamps for incremental backup
+      manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap());
+      ArrayList<BackupImage> ancestors = backupManager.getAncestors(backupContext);
+      for (BackupImage image : ancestors) {
+        manifest.addDependentImage(image);
+      }
+      manifest.store(conf);
+    }
+  }
+
+  /**
+   * Get backup request meta data dir as string.
+   * @param backupContext backup context
+   * @return meta data dir
+   */
+  private String obtainBackupMetaDataStr(BackupInfo backupContext) {
+    StringBuffer sb = new StringBuffer();
+    sb.append("type=" + backupContext.getType() + ",tablelist=");
+    for (TableName table : backupContext.getTables()) {
+      sb.append(table + ";");
+    }
+    if (sb.lastIndexOf(";") > 0) {
+      sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
+    }
+    sb.append(",targetRootDir=" + backupContext.getTargetRootDir());
+
+    return sb.toString();
+  }
+
+  /**
+   * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying
+   * hlogs.
+   * @throws IOException exception
+   */
+  private void cleanupDistCpLog(BackupInfo backupContext, Configuration conf)
+      throws IOException {
+    Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent();
+    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
+    FileStatus[] files = FSUtils.listStatus(fs, rootPath);
+    if (files == null) {
+      return;
+    }
+    for (FileStatus file : files) {
+      if (file.getPath().getName().startsWith("_distcp_logs")) {
+        LOG.debug("Delete log files of DistCp: " + file.getPath().getName());
+        FSUtils.delete(fs, file.getPath(), true);
+      }
+    }
+  }
+
+  /**
+   * Complete the overall backup.
+   * @param backupContext backup context
+   * @throws Exception exception
+   */
+  protected void completeBackup(final Connection conn, BackupInfo backupContext,
+      BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
+    // set the complete timestamp of the overall backup
+    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+    // set overall backup status: complete
+    backupContext.setState(BackupState.COMPLETE);
+    backupContext.setProgress(100);
+    // add and store the manifest for the backup
+    addManifest(backupContext, backupManager, type, conf);
+
+    // after major steps done and manifest persisted, do convert if needed for incremental backup
+    /* in-fly convert code here, provided by future jira */
+    LOG.debug("in-fly convert code here, provided by future jira");
+
+    // compose the backup complete data
+    String backupCompleteData =
+        obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs()
+            + ",completets=" + backupContext.getEndTs() + ",bytescopied="
+            + backupContext.getTotalBytesCopied();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData);
+    }
+    backupManager.updateBackupInfo(backupContext);
+
+    // when full backup is done:
+    // - delete HBase snapshot
+    // - clean up directories with prefix "exportSnapshot-", which are generated when exporting
+    // snapshots
+    if (type == BackupType.FULL) {
+      deleteSnapshot(conn, backupContext, conf);
+      cleanupExportSnapshotLog(conf);
+    } else if (type == BackupType.INCREMENTAL) {
+      cleanupDistCpLog(backupContext, conf);
+    }
+    LOG.info("Backup " + backupContext.getBackupId() + " completed.");
+  }
+
+  /**
+   * Backup request execution
+   * @throws IOException
+   */
+  public abstract void execute() throws IOException;
+
+
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
index b942446..e116057 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
@@ -46,14 +46,13 @@ import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.zookeeper.KeeperException.NoNodeException;
+
 /**
  * Copier for backup operation. Basically, there are 2 types of copy. One is copying from snapshot,
- * which bases on extending ExportSnapshot's function with copy progress reporting to ZooKeeper
- * implementation. The other is copying for incremental log files, which bases on extending
- * DistCp's function with copy progress reporting to ZooKeeper implementation.
+ * which bases on extending ExportSnapshot's function. The other is copying for incremental
+ * log files, which bases on extending DistCp's function.
  *
  */
-
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class MapReduceBackupCopyTask implements BackupCopyTask {
@@ -215,7 +214,8 @@ public class MapReduceBackupCopyTask implements BackupCopyTask {
 
         // Update the copy progress to ZK every 0.5s if progress value changed
         int progressReportFreq =
-            this.getConf().getInt("hbase.backup.progressreport.frequency", 500);
+            MapReduceBackupCopyTask.this.getConf().
+              getInt("hbase.backup.progressreport.frequency", 500);
         float lastProgress = progressDone;
         while (!job.isComplete()) {
           float newProgress =

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
index dc19f9b..537fed7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
@@ -88,7 +88,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
       // If we do not have recorded backup sessions
       try {
         if (!table.hasBackupSessions()) {
-          LOG.debug("BackupLogCleaner has no backup sessions");
+          LOG.trace("BackupLogCleaner has no backup sessions");
           return files;
         }
       } catch (TableNotFoundException tnfe) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
index 99d9d91..d55965c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
@@ -38,10 +38,17 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.zookeeper.KeeperException;
 
+/**
+ *  Master procedure manager for coordinated cluster-wide
+ *  WAL roll operation, which is run during backup operation,
+ *  see {@link MasterProcedureManager} and and {@link RegionServerProcedureManager}
+ *
+ */
 public class LogRollMasterProcedureManager extends MasterProcedureManager {
 
   public static final String ROLLLOG_PROCEDURE_SIGNATURE = "rolllog-proc";

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
index e2c0d69..59bf51c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
@@ -89,9 +89,9 @@ public class LogRollBackupSubprocedure extends Subprocedure {
 
       LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum
           + " highest: " + highest + " on " + rss.getServerName());
-      ((HRegionServer)rss).walRoller.requestRollAll();
+      ((HRegionServer)rss).getWalRoller().requestRollAll();
       long start = EnvironmentEdgeManager.currentTime();
-      while (!((HRegionServer)rss).walRoller.walRollFinished()) {
+      while (!((HRegionServer)rss).getWalRoller().walRollFinished()) {
         Thread.sleep(20);
       }
       LOG.debug("log roll took " + (EnvironmentEdgeManager.currentTime()-start));

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
index 23270fd..838926a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
@@ -36,12 +36,11 @@ import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
 import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.zookeeper.KeeperException;
 
 /**
- * This manager class handles the work dealing with backup for a {@link HRegionServer}.
+ * This manager class handles the work dealing with distributed WAL roll request.
  * <p>
  * This provides the mechanism necessary to kick off a backup specific {@link Subprocedure} that is
  * responsible by this region server. If any failures occur with the subprocedure, the manager's


[3/3] hbase git commit: HBASE-14123 patch v40 (Vladimir)

Posted by te...@apache.org.
HBASE-14123 patch v40 (Vladimir)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c1eb653
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c1eb653
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c1eb653

Branch: refs/heads/14123
Commit: 7c1eb65365ac870746a3d422ab4b9491a2fdc8d7
Parents: f976dd1
Author: tedyu <yu...@gmail.com>
Authored: Wed Dec 7 13:08:04 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Dec 7 13:08:04 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  36 --
 .../ClientSnapshotDescriptionUtils.java         |   2 +-
 .../hbase/IntegrationTestBackupRestore.java     |   4 +-
 .../src/main/protobuf/Backup.proto              |  16 +-
 hbase-server/pom.xml                            |   5 -
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  28 +-
 .../hadoop/hbase/backup/BackupCopyTask.java     |   2 +-
 .../hadoop/hbase/backup/BackupDriver.java       |   2 +-
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  18 +-
 .../hadoop/hbase/backup/BackupStatus.java       |   2 +-
 .../hadoop/hbase/backup/HBackupFileSystem.java  |   5 +-
 .../hadoop/hbase/backup/RestoreDriver.java      |  10 +-
 .../hbase/backup/impl/BackupAdminImpl.java      | 556 +++++++++++++++++++
 .../hbase/backup/impl/BackupCommands.java       |  65 ++-
 .../hadoop/hbase/backup/impl/BackupManager.java |  62 +--
 .../hbase/backup/impl/BackupManifest.java       | 218 +++-----
 .../hbase/backup/impl/BackupSystemTable.java    | 103 ++--
 .../backup/impl/BackupSystemTableHelper.java    |   3 +-
 .../backup/impl/FullTableBackupClient.java      | 357 +-----------
 .../hbase/backup/impl/HBaseBackupAdmin.java     | 555 ------------------
 .../backup/impl/IncrementalBackupManager.java   |  30 +-
 .../impl/IncrementalTableBackupClient.java      |  43 +-
 .../hbase/backup/impl/RestoreTablesClient.java  |   4 -
 .../hbase/backup/impl/TableBackupClient.java    | 386 +++++++++++++
 .../mapreduce/MapReduceBackupCopyTask.java      |  10 +-
 .../hbase/backup/master/BackupLogCleaner.java   |   2 +-
 .../master/LogRollMasterProcedureManager.java   |   7 +
 .../regionserver/LogRollBackupSubprocedure.java |   4 +-
 .../LogRollRegionServerProcedureManager.java    |   3 +-
 .../hbase/backup/util/RestoreServerUtil.java    |  22 +-
 .../hbase/mapreduce/HFileInputFormat2.java      |   3 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |  19 +-
 .../procedure/ZKProcedureCoordinatorRpcs.java   |   3 +-
 .../hbase/regionserver/HRegionServer.java       |   7 +-
 .../hadoop/hbase/backup/TestBackupBase.java     |   6 +-
 .../hbase/backup/TestBackupCommandLineTool.java |  79 +--
 .../hadoop/hbase/backup/TestBackupDescribe.java |   4 +-
 .../hbase/backup/TestBackupMultipleDeletes.java |   4 +-
 .../hbase/backup/TestIncrementalBackup.java     |   4 +-
 .../TestIncrementalBackupDeleteTable.java       |   4 +-
 .../hbase/master/MockNoopMasterServices.java    |  11 -
 .../master/TestDistributedLogSplitting.java     |   1 +
 42 files changed, 1319 insertions(+), 1386 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 2cc8fa7..bdc3e54 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagUtil;
-import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Consistency;
@@ -79,7 +78,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -318,28 +316,6 @@ public final class ProtobufUtil {
     return ServerName.valueOf(hostName, port, startCode);
   }
 
-
-  /**
-   * Convert a protocol buffer ServerName to a ServerName
-   *
-   * @param proto the protocol buffer ServerName to convert
-   * @return the converted ServerName
-   */
-  public static ServerName toServerNameShaded(
-      final org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName proto) {
-    if (proto == null) return null;
-    String hostName = proto.getHostName();
-    long startCode = -1;
-    int port = -1;
-    if (proto.hasPort()) {
-      port = proto.getPort();
-    }
-    if (proto.hasStartCode()) {
-      startCode = proto.getStartCode();
-    }
-    return ServerName.valueOf(hostName, port, startCode);
-  }
-
   /**
    * Convert a protobuf Durability into a client Durability
    */
@@ -1694,14 +1670,6 @@ public final class ProtobufUtil {
         tableNamePB.getQualifier().asReadOnlyByteBuffer());
   }
 
-  public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName
-    toProtoTableNameShaded(TableName tableName) {
-    return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder()
-        .setNamespace(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFrom(tableName.getNamespace()))
-        .setQualifier(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFrom(tableName.getQualifier())).build();
-  }
-
-
   /**
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding
    * buffers when working with byte arrays
@@ -1779,10 +1747,6 @@ public final class ProtobufUtil {
     return regionBuilder.build();
   }
 
-  public static BackupProtos.BackupType toProtoBackupType(BackupType type) {
-    return BackupProtos.BackupType.valueOf(type.name());
-  }
-
   /**
    * Get a ServerName from the passed in data bytes.
    * @param data Data with a serialize server name in it; can handle the old style

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
index 7f19cbd..2a58b5a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
@@ -44,7 +44,7 @@ public class ClientSnapshotDescriptionUtils {
       // make sure the table name is valid, this will implicitly check validity
       TableName tableName = TableName.valueOf(snapshot.getTable());
 
-      if (tableName.isSystemTable() && !tableName.toString().equals("hbase:backup")) {
+      if (tableName.isSystemTable() && !TableName.BACKUP_TABLE_NAME.equals(tableName)) {
         // allow hbase:backup table snapshot, but disallow other system  tables
         throw new IllegalArgumentException("System table snapshots are not allowed");
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
index 416ac13..35a09d6 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.backup.BackupRequest;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.RestoreRequest;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -162,7 +162,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase {
     List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
     HBaseAdmin admin = null;
     admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdmin client = new HBaseBackupAdmin(util.getConnection());
+    BackupAdmin client = new BackupAdminImpl(util.getConnection());
 
     BackupRequest request = new BackupRequest();
     request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-protocol-shaded/src/main/protobuf/Backup.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Backup.proto b/hbase-protocol-shaded/src/main/protobuf/Backup.proto
index b7196ca..7a535d9 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Backup.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Backup.proto
@@ -49,16 +49,15 @@ message BackupImage {
   repeated TableName table_list = 4;
   optional uint64 start_ts = 5;
   optional uint64 complete_ts = 6;
-  repeated BackupImage ancestors = 7; 
+  repeated BackupImage ancestors = 7;
   repeated TableServerTimestamp tst_map = 8;
-  
-}
 
+}
 
 message TableBackupStatus {
   optional TableName table = 1;
   optional string target_dir = 2;
-  optional string snapshot = 3; 	
+  optional string snapshot = 3;
 }
 
 message BackupInfo {
@@ -71,11 +70,11 @@ message BackupInfo {
   repeated TableBackupStatus table_backup_status = 7;
   optional uint64  start_ts = 8;
   optional uint64  end_ts = 9;
-  optional uint32 progress = 10; 
+  optional uint32 progress = 10;
   optional string job_id = 11;
   optional uint32 workers_number = 12;
   optional uint64 bandwidth = 13;
-  
+
   enum BackupState {
     WAITING = 0;
     RUNNING = 1;
@@ -91,6 +90,5 @@ message BackupInfo {
     SNAPSHOTCOPY = 3;
     INCREMENTAL_COPY = 4;
     STORE_MANIFEST = 5;
-  } 
-}
-
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index e6aed8e..57910e7 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -408,11 +408,6 @@
       <version>${hadoop-two.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <version>${hadoop-two.version}</version>
-    </dependency>
-    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
index 0b8de28..f024406 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 public interface BackupAdmin extends Closeable{
 
   /**
-   * Backs up given list of tables fully. Synchronous operation.
+   * Backup given list of tables fully. Synchronous operation.
    *
    * @param userRequest BackupRequest instance
    * @return the backup Id
@@ -59,14 +59,14 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Restore backup
-   * @param request - restore request
+   * @param request restore request
    * @throws IOException exception
    */
   public void restore(RestoreRequest request) throws IOException;
 
   /**
    * Restore backup
-   * @param request - restore request
+   * @param request restore request
    * @return Future which client can wait on
    * @throws IOException exception
    */
@@ -74,7 +74,7 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Describe backup image command
-   * @param backupId - backup id
+   * @param backupId backup id
    * @return backup info
    * @throws IOException exception
    */
@@ -82,7 +82,7 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Show backup progress command
-   * @param backupId - backup id (may be null)
+   * @param backupId backup id (may be null)
    * @return backup progress (0-100%), -1 if no active sessions
    *  or session not found
    * @throws IOException exception
@@ -91,7 +91,7 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Delete backup image command
-   * @param backupIds - backup id
+   * @param backupIds backup id list
    * @return total number of deleted sessions
    * @throws IOException exception
    */
@@ -99,7 +99,7 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Show backup history command
-   * @param n - last n backup sessions
+   * @param n last n backup sessions
    * @return list of backup infos
    * @throws IOException exception
    */
@@ -108,8 +108,8 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Show backup history command with filters
-   * @param n - last n backup sessions
-   * @param f - list of filters
+   * @param n last n backup sessions
+   * @param f list of filters
    * @return list of backup infos
    * @throws IOException exception
    */
@@ -135,7 +135,7 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Delete backup set command
-   * @param name - backup set name
+   * @param name backup set name
    * @return true, if success, false - otherwise
    * @throws IOException exception
    */
@@ -143,16 +143,16 @@ public interface BackupAdmin extends Closeable{
 
   /**
    * Add tables to backup set command
-   * @param name - name of backup set.
-   * @param tables - list of tables to be added to this set.
+   * @param name name of backup set.
+   * @param tables list of tables to be added to this set.
    * @throws IOException exception
    */
   public void addToBackupSet(String name, TableName[] tables) throws IOException;
 
   /**
    * Remove tables from backup set
-   * @param name - name of backup set.
-   * @param tables - list of tables to be removed from this set.
+   * @param name name of backup set.
+   * @param tables list of tables to be removed from this set.
    * @throws IOException exception
    */
   public void removeFromBackupSet(String name, String[] tables) throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
index 26a7e44..c543062 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
@@ -41,7 +41,7 @@ public interface BackupCopyTask extends Configurable {
    * @throws IOException exception
    */
   int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf,
-      BackupType copyType, String[] options) throws IOException;
+      BackupType backupType, String[] options) throws IOException;
 
 
    /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
index 099e418..fcfd5b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -58,7 +58,7 @@ public class BackupDriver extends AbstractHBaseTool implements BackupRestoreCons
     // Check if backup is enabled
     if (!BackupManager.isBackupEnabled(getConf())) {
       System.err.println("Backup is not enabled. To enable backup, "+
-          "set \'hbase.backup.enabled'=true and restart "+
+          "set " +BackupRestoreConstants.BACKUP_ENABLE_KEY+"=true and restart "+
           "the cluster");
       return -1;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
index 4ea0299..0f861a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
@@ -176,7 +176,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
     this.addTables(tables);
 
     if (type == BackupType.INCREMENTAL) {
-      setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId));
+      setHLogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId));
     }
 
     this.startTs = 0;
@@ -220,10 +220,6 @@ public class BackupInfo implements Comparable<BackupInfo> {
     this.tableSetTimestampMap = tableSetTimestampMap;
   }
 
-  public String getHlogTargetDir() {
-    return hlogTargetDir;
-  }
-
   public void setType(BackupType type) {
     this.type = type;
   }
@@ -355,7 +351,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
     return targetRootDir;
   }
 
-  public void setHlogTargetDir(String hlogTagetDir) {
+  public void setHLogTargetDir(String hlogTagetDir) {
     this.hlogTargetDir = hlogTagetDir;
   }
 
@@ -488,7 +484,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
       context.setState(BackupInfo.BackupState.valueOf(proto.getState().name()));
     }
 
-    context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(),
+    context.setHLogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(),
       proto.getBackupId()));
 
     if (proto.hasPhase()) {
@@ -537,7 +533,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
       date = cal.getTime();
       sb.append("End time       : " + date).append("\n");
     }
-    sb.append("Progress       : " + getProgress()).append("\n");
+    sb.append("Progress       : " + getProgress()+"%").append("\n");
     return sb.toString();
   }
 
@@ -549,7 +545,11 @@ public class BackupInfo implements Comparable<BackupInfo> {
   }
 
   public String getTableListAsString() {
-    return StringUtils.join(backupStatusMap.keySet(), ",");
+    StringBuffer sb = new StringBuffer();
+    sb.append("{");
+    sb.append(StringUtils.join(backupStatusMap.keySet(), ","));
+    sb.append("}");
+    return sb.toString();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
index fd856ec..0275140 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
@@ -97,7 +97,7 @@ public class BackupStatus implements Serializable {
     if(snapshotName != null) {
       builder.setSnapshot(snapshotName);
     }
-    builder.setTable(ProtobufUtil.toProtoTableNameShaded(table));
+    builder.setTable(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
     builder.setTargetDir(targetDir);
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
index 9deb15b..49586bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -125,6 +125,7 @@ public class HBackupFileSystem {
       Path backupRootPath, String backupId) throws IOException {
     Path manifestPath = new Path(getTableBackupPath(tableName, backupRootPath, backupId),
       BackupManifest.MANIFEST_FILE_NAME);
+
     FileSystem fs = backupRootPath.getFileSystem(conf);
     if (!fs.exists(manifestPath)) {
       // check log dir for incremental backup case
@@ -134,8 +135,8 @@ public class HBackupFileSystem {
       if (!fs.exists(manifestPath)) {
         String errorMsg =
             "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " +
-                backupId + " in " + backupRootPath.toString() +
-                ". Did " + backupId + " correspond to previously taken backup ?";
+                backupId + ". File " + manifestPath +
+                " does not exists. Did " + backupId + " correspond to previously taken backup ?";
         throw new IOException(errorMsg);
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
index 336060f..1ca512e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.util.BackupServerUtil;
 import org.apache.hadoop.hbase.backup.util.LogUtils;
 import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
@@ -52,8 +52,8 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon
   private static final String USAGE_STRING =
       "Usage: bin/hbase restore <backup_path> <backup_id> <table(s)> [options]\n"
           + "  backup_path     Path to a backup destination root\n"
-          + "  backup_id       Backup image ID to restore"
-          + "  table(s)        Comma-separated list of tables to restore";
+          + "  backup_id       Backup image ID to restore\n"
+          + "  table(s)        Comma-separated list of tables to restore\n";
 
   private static final String USAGE_FOOTER = "";
 
@@ -70,7 +70,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon
     // Check if backup is enabled
     if (!BackupManager.isBackupEnabled(getConf())) {
       System.err.println("Backup is not enabled. To enable backup, "+
-          "set \'hbase.backup.enabled'=true and restart "+
+          "set "+ BackupRestoreConstants.BACKUP_ENABLE_KEY+"=true and restart "+
           "the cluster");
       return -1;
     }
@@ -110,7 +110,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon
     String tableMapping =
         cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
     try (final Connection conn = ConnectionFactory.createConnection(conf);
-        BackupAdmin client = new HBaseBackupAdmin(conn);) {
+        BackupAdmin client = new BackupAdminImpl(conn);) {
       // Check backup set
       if (cmd.hasOption(OPTION_SET)) {
         String setName = cmd.getOptionValue(OPTION_SET);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
new file mode 100644
index 0000000..b73e576
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -0,0 +1,556 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.collect.Lists;
+
+/**
+ * The administrative API implementation for HBase Backup . Create an instance from
+ * {@link #BackupAdminImpl(Connection)} and call {@link #close()} afterwards.
+ * <p>BackupAdmin can be used to create backups, restore data from backups and for
+ * other backup-related operations.
+ *
+ * @see Admin
+ * @since 2.0
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+
+public class BackupAdminImpl implements BackupAdmin {
+  private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class);
+
+  private final Connection conn;
+
+  public BackupAdminImpl(Connection conn) {
+    this.conn = conn;
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (conn != null) {
+      conn.close();
+    }
+  }
+
+  @Override
+  public BackupInfo getBackupInfo(String backupId) throws IOException {
+    BackupInfo backupInfo = null;
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      backupInfo = table.readBackupInfo(backupId);
+      return backupInfo;
+    }
+  }
+
+  @Override
+  public int getProgress(String backupId) throws IOException {
+    BackupInfo backupInfo = null;
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      if (backupId == null) {
+        ArrayList<BackupInfo> recentSessions = table.getBackupContexts(BackupState.RUNNING);
+        if (recentSessions.isEmpty()) {
+          LOG.warn("No ongoing sessions found.");
+          return -1;
+        }
+        // else show status for ongoing session
+        // must be one maximum
+        return recentSessions.get(0).getProgress();
+      } else {
+
+        backupInfo = table.readBackupInfo(backupId);
+        if (backupInfo != null) {
+          return backupInfo.getProgress();
+        } else {
+          LOG.warn("No information found for backupID=" + backupId);
+          return -1;
+        }
+      }
+    }
+  }
+
+  @Override
+  public int deleteBackups(String[] backupIds) throws IOException {
+    // TODO: requires FT, failure will leave system
+    // in non-consistent state
+    // see HBASE-15227
+
+    int totalDeleted = 0;
+    Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
+
+    try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
+      for (int i = 0; i < backupIds.length; i++) {
+        BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
+        if (info != null) {
+          String rootDir = info.getTargetRootDir();
+          HashSet<TableName> allTables = allTablesMap.get(rootDir);
+          if (allTables == null) {
+            allTables = new HashSet<TableName>();
+            allTablesMap.put(rootDir, allTables);
+          }
+          allTables.addAll(info.getTableNames());
+          totalDeleted += deleteBackup(backupIds[i], sysTable);
+        }
+      }
+      finalizeDelete(allTablesMap, sysTable);
+    }
+    return totalDeleted;
+  }
+
+  /**
+   * Updates incremental backup set for every backupRoot
+   * @param tablesMap - Map [backupRoot: Set<TableName>]
+   * @param table - backup system table
+   * @throws IOException
+   */
+
+  private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
+      throws IOException {
+    for (String backupRoot : tablesMap.keySet()) {
+      Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
+      Map<TableName, ArrayList<BackupInfo>> tableMap =
+          table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
+      for(Map.Entry<TableName, ArrayList<BackupInfo>> entry: tableMap.entrySet()) {
+        if(entry.getValue() == null) {
+          // No more backups for a table
+          incrTableSet.remove(entry.getKey());
+        }
+      }
+      if (!incrTableSet.isEmpty()) {
+        table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
+      } else { // empty
+        table.deleteIncrementalBackupTableSet(backupRoot);
+      }
+    }
+  }
+
+  /**
+   * Delete single backup and all related backups
+   * Algorithm:
+   *
+   * Backup type: FULL or INCREMENTAL
+   * Is this last backup session for table T: YES or NO
+   * For every table T from table list 'tables':
+   * if(FULL, YES) deletes only physical data (PD)
+   * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until
+   * we either reach the most recent backup for T in the system or FULL backup which
+   * includes T
+   * if(INCREMENTAL, YES) deletes only physical data (PD)
+   * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images
+   * between last FULL backup, which is older than the backup being deleted and the next
+   * FULL backup (if exists) or last one for a particular table T and removes T from list
+   * of backup tables.
+   * @param backupId - backup id
+   * @param sysTable - backup system table
+   * @return total - number of deleted backup images
+   * @throws IOException
+   */
+  private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
+
+    BackupInfo backupInfo = sysTable.readBackupInfo(backupId);
+
+    int totalDeleted = 0;
+    if (backupInfo != null) {
+      LOG.info("Deleting backup " + backupInfo.getBackupId() + " ...");
+      BackupClientUtil.cleanupBackupData(backupInfo, conn.getConfiguration());
+      // List of tables in this backup;
+      List<TableName> tables = backupInfo.getTableNames();
+      long startTime = backupInfo.getStartTs();
+      for (TableName tn : tables) {
+        boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime);
+        if (isLastBackupSession) {
+          continue;
+        }
+        // else
+        List<BackupInfo> affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable);
+        for (BackupInfo info : affectedBackups) {
+          if (info.equals(backupInfo)) {
+            continue;
+          }
+          removeTableFromBackupImage(info, tn, sysTable);
+        }
+      }
+      LOG.debug("Delete backup info "+ backupInfo.getBackupId());
+
+      sysTable.deleteBackupInfo(backupInfo.getBackupId());
+      LOG.info("Delete backup " + backupInfo.getBackupId() + " completed.");
+      totalDeleted++;
+    } else {
+      LOG.warn("Delete backup failed: no information found for backupID=" + backupId);
+    }
+    return totalDeleted;
+  }
+
+  private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
+      throws IOException {
+    List<TableName> tables = info.getTableNames();
+    LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" +
+      info.getTableListAsString());
+    if (tables.contains(tn)) {
+      tables.remove(tn);
+
+      if (tables.isEmpty()) {
+        LOG.debug("Delete backup info "+ info.getBackupId());
+
+        sysTable.deleteBackupInfo(info.getBackupId());
+        BackupClientUtil.cleanupBackupData(info, conn.getConfiguration());
+      } else {
+        info.setTables(tables);
+        sysTable.updateBackupInfo(info);
+        // Now, clean up directory for table
+        cleanupBackupDir(info, tn, conn.getConfiguration());
+      }
+    }
+  }
+
+  private List<BackupInfo> getAffectedBackupInfos(BackupInfo backupInfo, TableName tn,
+      BackupSystemTable table) throws IOException {
+    LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
+    long ts = backupInfo.getStartTs();
+    List<BackupInfo> list = new ArrayList<BackupInfo>();
+    List<BackupInfo> history = table.getBackupHistory(backupInfo.getTargetRootDir());
+    // Scan from most recent to backupInfo
+    // break when backupInfo reached
+    for (BackupInfo info : history) {
+      if (info.getStartTs() == ts) {
+        break;
+      }
+      List<TableName> tables = info.getTableNames();
+      if (tables.contains(tn)) {
+        BackupType bt = info.getType();
+        if (bt == BackupType.FULL) {
+          // Clear list if we encounter FULL backup
+          list.clear();
+        } else {
+          LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
+              + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
+          list.add(info);
+        }
+      }
+    }
+    return list;
+  }
+
+
+
+  /**
+   * Clean up the data at target directory
+   * @throws IOException
+   */
+  private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
+      throws IOException {
+    try {
+      // clean up the data at target directory
+      String targetDir = backupInfo.getTargetRootDir();
+      if (targetDir == null) {
+        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
+        return;
+      }
+
+      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf);
+
+      Path targetDirPath =
+          new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(),
+            backupInfo.getBackupId(), table));
+      if (outputFs.delete(targetDirPath, true)) {
+        LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
+      } else {
+        LOG.info("No data has been found in " + targetDirPath.toString() + ".");
+      }
+
+    } catch (IOException e1) {
+      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
+          + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
+      throw e1;
+    }
+  }
+
+  private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
+      throws IOException {
+    List<BackupInfo> history = table.getBackupHistory();
+    for (BackupInfo info : history) {
+      List<TableName> tables = info.getTableNames();
+      if (!tables.contains(tn)) {
+        continue;
+      }
+      if (info.getStartTs() <= startTime) {
+        return true;
+      } else {
+        return false;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public List<BackupInfo> getHistory(int n) throws IOException {
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      List<BackupInfo> history = table.getBackupHistory();
+      if (history.size() <= n) return history;
+      List<BackupInfo> list = new ArrayList<BackupInfo>();
+      for (int i = 0; i < n; i++) {
+        list.add(history.get(i));
+      }
+      return list;
+    }
+  }
+
+  @Override
+  public List<BackupInfo> getHistory(int n, BackupInfo.Filter ... filters) throws IOException {
+    if (filters.length == 0) return getHistory(n);
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      List<BackupInfo> history = table.getBackupHistory();
+      List<BackupInfo> result = new ArrayList<BackupInfo>();
+      for(BackupInfo bi: history) {
+        if(result.size() == n) break;
+        boolean passed = true;
+        for(int i=0; i < filters.length; i++) {
+          if(!filters[i].apply(bi)) {
+            passed = false;
+            break;
+          }
+        }
+        if(passed) {
+          result.add(bi);
+        }
+      }
+      return result;
+    }
+  }
+
+  @Override
+  public List<BackupSet> listBackupSets() throws IOException {
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      List<String> list = table.listBackupSets();
+      List<BackupSet> bslist = new ArrayList<BackupSet>();
+      for (String s : list) {
+        List<TableName> tables = table.describeBackupSet(s);
+        if (tables != null) {
+          bslist.add(new BackupSet(s, tables));
+        }
+      }
+      return bslist;
+    }
+  }
+
+  @Override
+  public BackupSet getBackupSet(String name) throws IOException {
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      List<TableName> list = table.describeBackupSet(name);
+      if (list == null) return null;
+      return new BackupSet(name, list);
+    }
+  }
+
+  @Override
+  public boolean deleteBackupSet(String name) throws IOException {
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      if (table.describeBackupSet(name) == null) {
+        return false;
+      }
+      table.deleteBackupSet(name);
+      return true;
+    }
+  }
+
+  @Override
+  public void addToBackupSet(String name, TableName[] tables) throws IOException {
+    String[] tableNames = new String[tables.length];
+    try (final BackupSystemTable table = new BackupSystemTable(conn);
+         final Admin admin = conn.getAdmin();) {
+      for (int i = 0; i < tables.length; i++) {
+        tableNames[i] = tables[i].getNameAsString();
+        if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
+          throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist");
+        }
+      }
+      table.addToBackupSet(name, tableNames);
+      LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name
+          + "' backup set");
+    }
+  }
+
+  @Override
+  public void removeFromBackupSet(String name, String[] tables) throws IOException {
+    LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      table.removeFromBackupSet(name, tables);
+      LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name
+          + "' completed.");
+    }
+  }
+
+  @Override
+  public void restore(RestoreRequest request) throws IOException {
+    if (request.isCheck()) {
+      HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>();
+      // check and load backup image manifest for the tables
+      Path rootPath = new Path(request.getBackupRootDir());
+      String backupId = request.getBackupId();
+      TableName[] sTableArray = request.getFromTables();
+      HBackupFileSystem.checkImageManifestExist(backupManifestMap,
+        sTableArray, conn.getConfiguration(), rootPath, backupId);
+
+      // Check and validate the backup image and its dependencies
+
+        if (RestoreServerUtil.validate(backupManifestMap, conn.getConfiguration())) {
+          LOG.info("Checking backup images: ok");
+        } else {
+          String errMsg = "Some dependencies are missing for restore";
+          LOG.error(errMsg);
+          throw new IOException(errMsg);
+        }
+
+    }
+    // Execute restore request
+    new RestoreTablesClient(conn, request).execute();
+  }
+
+  @Override
+  public Future<Void> restoreAsync(RestoreRequest request) throws IOException {
+    throw new UnsupportedOperationException("Asynchronous restore is not supported yet");
+  }
+
+  @Override
+  public String backupTables(final BackupRequest request) throws IOException {
+    String setName = request.getBackupSetName();
+    BackupType type = request.getBackupType();
+    String targetRootDir = request.getTargetRootDir();
+    List<TableName> tableList = request.getTableList();
+
+    String backupId =
+        (setName == null || setName.length() == 0 ? BackupRestoreConstants.BACKUPID_PREFIX
+            : setName + "_") + EnvironmentEdgeManager.currentTime();
+    if (type == BackupType.INCREMENTAL) {
+      Set<TableName> incrTableSet = null;
+      try (BackupSystemTable table = new BackupSystemTable(conn)) {
+        incrTableSet = table.getIncrementalBackupTableSet(targetRootDir);
+      }
+
+      if (incrTableSet.isEmpty()) {
+        System.err.println("Incremental backup table set contains no table.\n"
+            + "Use 'backup create full' or 'backup stop' to \n "
+            + "change the tables covered by incremental backup.");
+        throw new IOException("No table covered by incremental backup.");
+      }
+
+      tableList.removeAll(incrTableSet);
+      if (!tableList.isEmpty()) {
+        String extraTables = StringUtils.join(tableList, ",");
+        System.err.println("Some tables (" + extraTables + ") haven't gone through full backup");
+        throw new IOException("Perform full backup on " + extraTables + " first, "
+            + "then retry the command");
+      }
+      System.out.println("Incremental backup for the following table set: " + incrTableSet);
+      tableList = Lists.newArrayList(incrTableSet);
+    }
+    if (tableList != null && !tableList.isEmpty()) {
+      for (TableName table : tableList) {
+        String targetTableBackupDir =
+            HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+        Path targetTableBackupDirPath = new Path(targetTableBackupDir);
+        FileSystem outputFs =
+            FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
+        if (outputFs.exists(targetTableBackupDirPath)) {
+          throw new IOException("Target backup directory " + targetTableBackupDir
+              + " exists already.");
+        }
+      }
+      ArrayList<TableName> nonExistingTableList = null;
+      try (Admin admin = conn.getAdmin();) {
+        for (TableName tableName : tableList) {
+          if (!admin.tableExists(tableName)) {
+            if (nonExistingTableList == null) {
+              nonExistingTableList = new ArrayList<>();
+            }
+            nonExistingTableList.add(tableName);
+          }
+        }
+      }
+      if (nonExistingTableList != null) {
+        if (type == BackupType.INCREMENTAL) {
+          System.err.println("Incremental backup table set contains non-exising table: "
+              + nonExistingTableList);
+          // Update incremental backup set
+          tableList = excludeNonExistingTables(tableList, nonExistingTableList);
+        } else {
+          // Throw exception only in full mode - we try to backup non-existing table
+          throw new IOException("Non-existing tables found in the table list: "
+              + nonExistingTableList);
+        }
+      }
+    }
+
+    // update table list
+    request.setTableList(tableList);
+
+    if (type == BackupType.FULL) {
+      new FullTableBackupClient(conn, backupId, request).execute();
+    } else {
+      new IncrementalTableBackupClient(conn, backupId, request).execute();
+    }
+    return backupId;
+  }
+
+
+  private List<TableName> excludeNonExistingTables(List<TableName> tableList,
+      List<TableName> nonExistingTableList) {
+
+    for (TableName table : nonExistingTableList) {
+      tableList.remove(table);
+    }
+    return tableList;
+  }
+
+  @Override
+  public Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException {
+    throw new UnsupportedOperationException("Asynchronous backup is not supported yet");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index c4227f5..8da489d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
 import org.apache.hadoop.hbase.backup.BackupRequest;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.BackupType;
@@ -66,31 +67,33 @@ public final class BackupCommands implements BackupRestoreConstants {
       + "Run \'bin/hbase backup COMMAND -h\' to see help message for each command\n";
 
   public static final String CREATE_CMD_USAGE =
-       "Usage: bin/hbase backup create <type> <backup_root> [tables] [options]\n"
-       + "  type            \"full\" to create a full backup image\n"
-       + "                  \"incremental\" to create an incremental backup image\n"
-       + "  backup_root     Full path to store the backup image\n"
+       "Usage: bin/hbase backup create <type> <backup_path> [tables] [options]\n"
+       + "  type           \"full\" to create a full backup image\n"
+       + "                 \"incremental\" to create an incremental backup image\n"
+       + "  backup_path     Full path to store the backup image\n"
        + "  tables          If no tables (\"\") are specified, all tables are backed up.\n"
        + "                  otherwise it is a comma separated list of tables.";
 
 
-  public static final String PROGRESS_CMD_USAGE = "Usage: bin/hbase backup progress <backupId>\n"
-       + "  backupId        Backup image id\n";
+  public static final String PROGRESS_CMD_USAGE = "Usage: bin/hbase backup progress <backup_id>\n"
+       + "  backup_id       Backup image id (optional). If no id specified, the command will show\n"+
+         "                  progress for currently running backup session.";
   public static final String NO_INFO_FOUND = "No info was found for backup id: ";
+  public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found.";
 
-  public static final String DESCRIBE_CMD_USAGE = "Usage: bin/hbase backup describe <backupId>\n"
-       + "  backupId        Backup image id\n";
+  public static final String DESCRIBE_CMD_USAGE = "Usage: bin/hbase backup describe <backup_id>\n"
+       + "  backup_id       Backup image id\n";
 
   public static final String HISTORY_CMD_USAGE =
        "Usage: bin/hbase backup history [options]";
 
 
 
-  public static final String DELETE_CMD_USAGE = "Usage: bin/hbase backup delete <backupId>\n"
-       + "  backupId        Backup image id\n";
+  public static final String DELETE_CMD_USAGE = "Usage: bin/hbase backup delete <backup_id>\n"
+       + "  backup_id       Backup image id\n";
 
-  public static final String CANCEL_CMD_USAGE = "Usage: bin/hbase backup cancel <backupId>\n"
-       + "  backupId        Backup image id\n";
+  public static final String CANCEL_CMD_USAGE = "Usage: bin/hbase backup cancel <backup_id>\n"
+       + "  backup_id       Backup image id\n";
 
   public static final String SET_CMD_USAGE = "Usage: bin/hbase backup set COMMAND [name] [tables]\n"
        + "  name            Backup set name\n"
@@ -227,7 +230,7 @@ public final class BackupCommands implements BackupRestoreConstants {
           Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS)) : -1;
 
       try (Connection conn = ConnectionFactory.createConnection(getConf());
-          HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) {
+          BackupAdminImpl admin = new BackupAdminImpl(conn);) {
         BackupRequest request = new BackupRequest();
         request.setBackupType(BackupType.valueOf(args[1].toUpperCase()))
         .setTableList(tables != null?Lists.newArrayList(BackupClientUtil.parseTableNames(tables)): null)
@@ -392,8 +395,8 @@ public final class BackupCommands implements BackupRestoreConstants {
 
       if (cmdline == null || cmdline.getArgs() == null ||
           cmdline.getArgs().length == 1) {
-        System.err.println("No backup id was specified, "
-            + "will retrieve the most recent (ongoing) sessions");
+        System.out.println("No backup id was specified, "
+            + "will retrieve the most recent (ongoing) session");
       }
       String[] args = cmdline == null ? null : cmdline.getArgs();
       if (args != null && args.length > 2) {
@@ -406,10 +409,26 @@ public final class BackupCommands implements BackupRestoreConstants {
       Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create();
       try(final Connection conn = ConnectionFactory.createConnection(conf);
           final BackupSystemTable sysTable = new BackupSystemTable(conn);){
-        BackupInfo info = sysTable.readBackupInfo(backupId);
+        BackupInfo info = null;
+
+        if (backupId != null) {
+          info = sysTable.readBackupInfo(backupId);
+        } else {
+          List<BackupInfo> infos = sysTable.getBackupContexts(BackupState.RUNNING);
+          if(infos != null && infos.size() > 0) {
+            info = infos.get(0);
+            backupId = info.getBackupId();
+            System.out.println("Found ongoing session with backupId="+ backupId);
+          } else {
+          }
+        }
         int progress = info == null? -1: info.getProgress();
         if(progress < 0){
-          System.out.println(NO_INFO_FOUND + backupId);
+          if(backupId != null) {
+            System.out.println(NO_INFO_FOUND + backupId);
+          } else {
+            System.err.println(NO_ACTIVE_SESSION_FOUND);
+          }
         } else{
           System.out.println(backupId+" progress=" + progress+"%");
         }
@@ -443,7 +462,7 @@ public final class BackupCommands implements BackupRestoreConstants {
       System.arraycopy(args, 1, backupIds, 0, backupIds.length);
       Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
       try (final Connection conn = ConnectionFactory.createConnection(conf);
-          HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) {
+          BackupAdminImpl admin = new BackupAdminImpl(conn);) {
         int deleted = admin.deleteBackups(args);
         System.out.println("Deleted " + deleted + " backups. Total requested: " + args.length);
       }
@@ -473,7 +492,7 @@ public final class BackupCommands implements BackupRestoreConstants {
       }
       Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
       try (final Connection conn = ConnectionFactory.createConnection(conf);
-          HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) {
+          BackupAdminImpl admin = new BackupAdminImpl(conn);) {
         // TODO cancel backup
       }
     }
@@ -648,7 +667,7 @@ public final class BackupCommands implements BackupRestoreConstants {
       // does not expect any args
       Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create();
       try(final Connection conn = ConnectionFactory.createConnection(conf);
-          HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){
+          BackupAdminImpl admin = new BackupAdminImpl(conn);){
         List<BackupSet> list = admin.listBackupSets();
         for(BackupSet bs: list){
           System.out.println(bs);
@@ -683,7 +702,7 @@ public final class BackupCommands implements BackupRestoreConstants {
       String setName = args[2];
       Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create();
       try(final Connection conn = ConnectionFactory.createConnection(conf);
-          final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){
+          final BackupAdminImpl admin = new BackupAdminImpl(conn);){
         boolean result = admin.deleteBackupSet(setName);
         if(result){
           System.out.println("Delete set "+setName+" OK.");
@@ -703,7 +722,7 @@ public final class BackupCommands implements BackupRestoreConstants {
       String[] tables = args[3].split(",");
       Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create();
       try(final Connection conn = ConnectionFactory.createConnection(conf);
-          final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){
+          final BackupAdminImpl admin = new BackupAdminImpl(conn);){
         admin.removeFromBackupSet(setName, tables);
       }
     }
@@ -721,7 +740,7 @@ public final class BackupCommands implements BackupRestoreConstants {
       }
       Configuration conf = getConf() != null? getConf():HBaseConfiguration.create();
       try(final Connection conn = ConnectionFactory.createConnection(conf);
-          final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){
+          final BackupAdminImpl admin = new BackupAdminImpl(conn);){
         admin.addToBackupSet(setName, tableNames);
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index 6fb7cfd..ece07b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -25,10 +25,6 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -52,8 +48,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 /**
  * Handles backup requests on server-side, creates backup context records in hbase:backup
  * to keep track backup. The timestamps kept in hbase:backup table will be used for future
@@ -64,11 +58,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 public class BackupManager implements Closeable {
   private static final Log LOG = LogFactory.getLog(BackupManager.class);
 
-  private Configuration conf = null;
-  private BackupInfo backupContext = null;
-  private ExecutorService pool = null;
-  private BackupSystemTable systemTable;
-  private final Connection conn;
+  protected Configuration conf = null;
+  protected BackupInfo backupContext = null;
+  protected BackupSystemTable systemTable;
+  protected final Connection conn;
 
   /**
    * Backup manager constructor.
@@ -175,11 +168,7 @@ public class BackupManager implements Closeable {
    */
   @Override
   public void close() {
-    // currently, we shutdown now for all ongoing back handlers, we may need to do something like
-    // record the failed list somewhere later
-    if (this.pool != null) {
-      this.pool.shutdownNow();
-    }
+
     if (systemTable != null) {
       try {
         systemTable.close();
@@ -187,13 +176,6 @@ public class BackupManager implements Closeable {
         LOG.error(e);
       }
     }
-    if (conn != null) {
-      try {
-        conn.close();
-      } catch (IOException e) {
-        LOG.error(e);
-      }
-    }
   }
 
   /**
@@ -273,15 +255,6 @@ public class BackupManager implements Closeable {
           + ". Can not launch new backup until no ongoing backup remains.");
       throw new BackupException("There is ongoing backup.");
     }
-
-    // Initialize thread pools
-    int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1);
-    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
-    builder.setNameFormat("BackupHandler-%1$d");
-    this.pool =
-        new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
-            new LinkedBlockingQueue<Runnable>(), builder.build());
-    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
   }
 
   public void setBackupContext(BackupInfo backupContext) {
@@ -312,11 +285,14 @@ public class BackupManager implements Closeable {
 
     ArrayList<BackupInfo> allHistoryList = getBackupHistory(true);
     for (BackupInfo backup : allHistoryList) {
-      BackupImage image =
-          new BackupImage(backup.getBackupId(), backup.getType(),
-            backup.getTargetRootDir(),
-              backup.getTableNames(), backup.getStartTs(), backup
-                  .getEndTs());
+
+      BackupImage.Builder builder = BackupImage.newBuilder();
+
+      BackupImage image = builder.withBackupId(backup.getBackupId()).
+        withType(backup.getType()).withRootDir(backup.getTargetRootDir()).
+        withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()).
+        withCompleteTime(backup.getEndTs()).build();
+
       // add the full backup image as an ancestor until the last incremental backup
       if (backup.getType().equals(BackupType.FULL)) {
         // check the backup image coverage, if previous image could be covered by the newer ones,
@@ -331,10 +307,9 @@ public class BackupManager implements Closeable {
         // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing
         // incremental backup
         if (BackupManifest.canCoverImage(ancestors, image)) {
-          LOG.debug("Met the backup boundary of the current table set. "
-              + "The root full backup images for the current backup scope:");
+          LOG.debug("Met the backup boundary of the current table set:");
           for (BackupImage image1 : ancestors) {
-            LOG.debug("  BackupId: " + image1.getBackupId() + ", Backup directory: "
+            LOG.debug("  BackupID=" + image1.getBackupId() + ", BackupDir="
                 + image1.getRootDir());
           }
         } else {
@@ -348,9 +323,10 @@ public class BackupManager implements Closeable {
           BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
           ancestors.add(lastIncrImage);
 
-          LOG.debug("Last dependent incremental backup image information:");
-          LOG.debug("  Token: " + lastIncrImage.getBackupId());
-          LOG.debug("  Backup directory: " + lastIncrImage.getRootDir());
+          LOG.debug("Last dependent incremental backup image: "
+               + "{BackupID=" + lastIncrImage.getBackupId()+","
+               + "BackupDir=" + lastIncrImage.getRootDir()+"}"
+              );
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1eb653/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
index c3dc539..51f3cfb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo;
@@ -43,7 +42,7 @@ import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 
@@ -66,6 +65,49 @@ public class BackupManifest {
 
   public static class BackupImage implements Comparable<BackupImage> {
 
+    static class Builder {
+      BackupImage image;
+
+      Builder() {
+        image = new BackupImage();
+      }
+
+      Builder withBackupId(String backupId) {
+        image.setBackupId(backupId);
+        return this;
+      }
+
+      Builder withType(BackupType type) {
+        image.setType(type);
+        return this;
+      }
+
+      Builder withRootDir(String rootDir) {
+        image.setRootDir(rootDir);
+        return this;
+      }
+
+      Builder withTableList(List<TableName> tableList) {
+        image.setTableList(tableList);
+        return this;
+      }
+
+      Builder withStartTime(long startTime) {
+        image.setStartTs(startTime);
+        return this;
+      }
+
+      Builder withCompleteTime(long completeTime) {
+        image.setCompleteTs(completeTime);
+        return this;
+      }
+
+      BackupImage build() {
+        return image;
+      }
+
+    }
+
     private String backupId;
     private BackupType type;
     private String rootDir;
@@ -75,11 +117,15 @@ public class BackupManifest {
     private ArrayList<BackupImage> ancestors;
     private HashMap<TableName, HashMap<String, Long>> incrTimeRanges;
 
+    static Builder newBuilder() {
+      return new Builder();
+    }
+
     public BackupImage() {
       super();
     }
 
-    public BackupImage(String backupId, BackupType type, String rootDir,
+    private BackupImage(String backupId, BackupType type, String rootDir,
         List<TableName> tableList, long startTs, long completeTs) {
       this.backupId = backupId;
       this.type = type;
@@ -127,7 +173,7 @@ public class BackupManifest {
       }
 
       for (TableName name: tableList) {
-        builder.addTableList(ProtobufUtil.toProtoTableNameShaded(name));
+        builder.addTableList(ProtobufUtil.toProtoTableName(name));
       }
 
       if (ancestors != null){
@@ -157,7 +203,7 @@ public class BackupManifest {
         }
         List<BackupProtos.ServerTimestamp> listSt = tst.getServerTimestampList();
         for(BackupProtos.ServerTimestamp stm: listSt) {
-          ServerName sn = ProtobufUtil.toServerNameShaded(stm.getServer());
+          ServerName sn = ProtobufUtil.toServerName(stm.getServer());
           map.put(sn.getHostname() +":" + sn.getPort(), stm.getTimestamp());
         }
       }
@@ -174,7 +220,7 @@ public class BackupManifest {
         HashMap<String, Long> value = entry.getValue();
         BackupProtos.TableServerTimestamp.Builder tstBuilder =
             BackupProtos.TableServerTimestamp.newBuilder();
-        tstBuilder.setTable(ProtobufUtil.toProtoTableNameShaded(key));
+        tstBuilder.setTable(ProtobufUtil.toProtoTableName(key));
 
         for (Map.Entry<String, Long> entry2 : value.entrySet()) {
           String s = entry2.getKey();
@@ -312,79 +358,37 @@ public class BackupManifest {
     }
   }
 
-  // hadoop hbase configuration
-  protected Configuration config = null;
-
-  // backup root directory
-  private String rootDir = null;
-
   // backup image directory
   private String tableBackupDir = null;
-
-  // backup log directory if this is an incremental backup
-  private String logBackupDir = null;
-
-  // backup token
-  private String backupId;
-
-  // backup type, full or incremental
-  private BackupType type;
-
-  // the table list for the backup
-  private ArrayList<TableName> tableList;
-
-  // actual start timestamp of the backup process
-  private long startTs;
-
-  // actual complete timestamp of the backup process
-  private long completeTs;
-
-  // the region server timestamp for tables:
-  // <table, <rs, timestamp>>
-  private Map<TableName, HashMap<String, Long>> incrTimeRanges;
-
-  // dependency of this backup, including all the dependent images to do PIT recovery
-  //private Map<String, BackupImage> dependency;
   private BackupImage backupImage;
 
   /**
    * Construct manifest for a ongoing backup.
-   * @param backupCtx The ongoing backup context
+   * @param backup The ongoing backup info
    */
-  public BackupManifest(BackupInfo backupCtx) {
-    this.backupId = backupCtx.getBackupId();
-    this.type = backupCtx.getType();
-    this.rootDir = backupCtx.getTargetRootDir();
-    if (this.type == BackupType.INCREMENTAL) {
-      this.logBackupDir = backupCtx.getHLogTargetDir();
-    }
-    this.startTs = backupCtx.getStartTs();
-    this.completeTs = backupCtx.getEndTs();
-    this.loadTableList(backupCtx.getTableNames());
-    this.backupImage = new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs,
-     this.completeTs);
+  public BackupManifest(BackupInfo backup) {
+
+    BackupImage.Builder builder = BackupImage.newBuilder();
+    this.backupImage = builder.withBackupId(backup.getBackupId()).
+      withType(backup.getType()).withRootDir(backup.getTargetRootDir()).
+      withTableList(backup.getTableNames()).withStartTime(backup.getStartTs()).
+      withCompleteTime(backup.getEndTs()).build();
   }
 
 
   /**
    * Construct a table level manifest for a backup of the named table.
-   * @param backupCtx The ongoing backup context
+   * @param backup The ongoing backup session info
    */
-  public BackupManifest(BackupInfo backupCtx, TableName table) {
-    this.backupId = backupCtx.getBackupId();
-    this.type = backupCtx.getType();
-    this.rootDir = backupCtx.getTargetRootDir();
-    this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir();
-    if (this.type == BackupType.INCREMENTAL) {
-      this.logBackupDir = backupCtx.getHLogTargetDir();
-    }
-    this.startTs = backupCtx.getStartTs();
-    this.completeTs = backupCtx.getEndTs();
+  public BackupManifest(BackupInfo backup, TableName table) {
+    this.tableBackupDir = backup.getBackupStatus(table).getTargetDir();
     List<TableName> tables = new ArrayList<TableName>();
     tables.add(table);
-    this.loadTableList(tables);
-    this.backupImage = new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs,
-      this.completeTs);
+    BackupImage.Builder builder = BackupImage.newBuilder();
+    this.backupImage = builder.withBackupId(backup.getBackupId()).
+      withType(backup.getType()).withRootDir(backup.getTargetRootDir()).
+      withTableList(tables).withStartTime(backup.getStartTs()).
+      withCompleteTime(backup.getEndTs()).build();
   }
 
   /**
@@ -413,8 +417,6 @@ public class BackupManifest {
     // It could be the backup log dir where there is also a manifest file stored.
     // This variable's purpose is to keep the correct and original location so
     // that we can store/persist it.
-    this.tableBackupDir = backupPath.toString();
-    this.config = fs.getConf();
     try {
 
       FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null);
@@ -438,23 +440,6 @@ public class BackupManifest {
             throw new BackupException(e);
           }
           this.backupImage = BackupImage.fromProto(proto);
-          // Here the parameter backupDir is where the manifest file is.
-          // There should always be a manifest file under:
-          // backupRootDir/namespace/table/backupId/.backup.manifest
-          this.rootDir = backupPath.getParent().getParent().getParent().toString();
-
-          Path p = backupPath.getParent();
-          if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
-            this.rootDir = p.getParent().toString();
-          } else {
-            this.rootDir = p.getParent().getParent().toString();
-          }
-          this.backupId = this.backupImage.getBackupId();
-          this.startTs = this.backupImage.getStartTs();
-          this.completeTs = this.backupImage.getCompleteTs();
-          this.type = this.backupImage.getType();
-          this.tableList = (ArrayList<TableName>)this.backupImage.getTableNames();
-          this.incrTimeRanges = this.backupImage.getIncrTimeRanges();
           LOG.debug("Loaded manifest instance from manifest file: "
               + BackupClientUtil.getPath(subFile.getPath()));
           return;
@@ -469,39 +454,15 @@ public class BackupManifest {
   }
 
   public BackupType getType() {
-    return type;
-  }
-
-  public void setType(BackupType type) {
-    this.type = type;
-  }
-
-  /**
-   * Loads table list.
-   * @param tableList Table list
-   */
-  private void loadTableList(List<TableName> tableList) {
-
-    this.tableList = this.getTableList();
-    if (this.tableList.size() > 0) {
-      this.tableList.clear();
-    }
-    for (int i = 0; i < tableList.size(); i++) {
-      this.tableList.add(tableList.get(i));
-    }
-
-    LOG.debug(tableList.size() + " tables exist in table set.");
+    return backupImage.getType();
   }
 
   /**
    * Get the table set of this image.
    * @return The table set list
    */
-  public ArrayList<TableName> getTableList() {
-    if (this.tableList == null) {
-      this.tableList = new ArrayList<TableName>();
-    }
-    return this.tableList;
+  public List<TableName> getTableList() {
+    return backupImage.getTableNames();
   }
 
   /**
@@ -512,14 +473,15 @@ public class BackupManifest {
   public void store(Configuration conf) throws BackupException {
     byte[] data = backupImage.toProto().toByteArray();
     // write the file, overwrite if already exist
+    String logBackupDir = BackupClientUtil.getLogBackupDir(backupImage.getRootDir(),
+      backupImage.getBackupId());
     Path manifestFilePath =
-        new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir))
+        new Path(new Path((tableBackupDir != null ? tableBackupDir : logBackupDir))
             ,MANIFEST_FILE_NAME);
-    try {
-      FSDataOutputStream out =
-          manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);
+    try ( FSDataOutputStream out =
+        manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);)
+    {
       out.write(data);
-      out.close();
     } catch (IOException e) {
       throw new BackupException(e.getMessage());
     }
@@ -527,7 +489,6 @@ public class BackupManifest {
     LOG.info("Manifest file stored to " + manifestFilePath);
   }
 
-
   /**
    * Get this backup image.
    * @return the backup image.
@@ -549,15 +510,11 @@ public class BackupManifest {
    * @param incrTimestampMap timestamp map
    */
   public void setIncrTimestampMap(HashMap<TableName, HashMap<String, Long>> incrTimestampMap) {
-    this.incrTimeRanges = incrTimestampMap;
     this.backupImage.setIncrTimeRanges(incrTimestampMap);
   }
 
   public Map<TableName, HashMap<String, Long>> getIncrTimestampMap() {
-    if (this.incrTimeRanges == null) {
-      this.incrTimeRanges = new HashMap<TableName, HashMap<String, Long>>();
-    }
-    return this.incrTimeRanges;
+    return backupImage.getIncrTimeRanges();
   }
 
   /**
@@ -697,14 +654,17 @@ public class BackupManifest {
   public BackupInfo toBackupInfo()
   {
     BackupInfo info = new BackupInfo();
-    info.setType(type);
-    TableName[] tables = new TableName[tableList.size()];
-    info.addTables(getTableList().toArray(tables));
-    info.setBackupId(backupId);
-    info.setStartTs(startTs);
-    info.setTargetRootDir(rootDir);
-    if(type == BackupType.INCREMENTAL) {
-      info.setHlogTargetDir(logBackupDir);
+    info.setType(backupImage.getType());
+    List<TableName> list = backupImage.getTableNames();
+    TableName[] tables = new TableName[list.size()];
+    info.addTables(list.toArray(tables));
+    info.setBackupId(backupImage.getBackupId());
+    info.setStartTs(backupImage.getStartTs());
+    info.setTargetRootDir(backupImage.getRootDir());
+    if(backupImage.getType() == BackupType.INCREMENTAL) {
+
+      info.setHLogTargetDir(BackupClientUtil.getLogBackupDir(backupImage.getRootDir(),
+        backupImage.getBackupId()));
     }
     return info;
   }