You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by el...@apache.org on 2017/08/23 16:47:15 UTC

[18/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

Signed-off-by: Josh Elser <el...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2dda3712
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2dda3712
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2dda3712

Branch: refs/heads/master
Commit: 2dda371209b2e810fa76034b8fa8dcf47140e265
Parents: 6c0e219
Author: Vladimir Rodionov <vr...@hortonworks.com>
Authored: Tue Aug 22 17:14:48 2017 -0700
Committer: Josh Elser <el...@apache.org>
Committed: Wed Aug 23 12:40:57 2017 -0400

----------------------------------------------------------------------
 hbase-assembly/pom.xml                          |    6 +-
 .../src/main/assembly/hadoop-two-compat.xml     |    1 +
 hbase-assembly/src/main/assembly/src.xml        |    1 +
 hbase-backup/.DS_Store                          |  Bin 0 -> 6148 bytes
 hbase-backup/pom.xml                            |  265 +++
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  136 ++
 .../hbase/backup/BackupClientFactory.java       |   53 +
 .../hadoop/hbase/backup/BackupCopyJob.java      |   55 +
 .../hadoop/hbase/backup/BackupDriver.java       |  210 ++
 .../hadoop/hbase/backup/BackupHFileCleaner.java |  180 ++
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  550 +++++
 .../hadoop/hbase/backup/BackupMergeJob.java     |   40 +
 .../hadoop/hbase/backup/BackupObserver.java     |  102 +
 .../hadoop/hbase/backup/BackupRequest.java      |  139 ++
 .../hbase/backup/BackupRestoreConstants.java    |  123 ++
 .../hbase/backup/BackupRestoreFactory.java      |   82 +
 .../hadoop/hbase/backup/BackupTableInfo.java    |   82 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  146 ++
 .../apache/hadoop/hbase/backup/LogUtils.java    |   50 +
 .../hadoop/hbase/backup/RestoreDriver.java      |  265 +++
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 +
 .../hadoop/hbase/backup/RestoreRequest.java     |  135 ++
 .../hbase/backup/impl/BackupAdminImpl.java      |  743 +++++++
 .../hbase/backup/impl/BackupCommands.java       | 1022 +++++++++
 .../hbase/backup/impl/BackupException.java      |   84 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  502 +++++
 .../hbase/backup/impl/BackupManifest.java       |  674 ++++++
 .../hbase/backup/impl/BackupSystemTable.java    | 2051 ++++++++++++++++++
 .../backup/impl/FullTableBackupClient.java      |  224 ++
 .../backup/impl/IncrementalBackupManager.java   |  387 ++++
 .../impl/IncrementalTableBackupClient.java      |  377 ++++
 .../hbase/backup/impl/RestoreTablesClient.java  |  278 +++
 .../hbase/backup/impl/TableBackupClient.java    |  436 ++++
 .../mapreduce/MapReduceBackupCopyJob.java       |  344 +++
 .../mapreduce/MapReduceBackupMergeJob.java      |  321 +++
 .../mapreduce/MapReduceHFileSplitterJob.java    |  181 ++
 .../backup/mapreduce/MapReduceRestoreJob.java   |  136 ++
 .../hbase/backup/master/BackupLogCleaner.java   |  142 ++
 .../master/LogRollMasterProcedureManager.java   |  155 ++
 .../regionserver/LogRollBackupSubprocedure.java |  168 ++
 .../LogRollBackupSubprocedurePool.java          |  139 ++
 .../LogRollRegionServerProcedureManager.java    |  185 ++
 .../hadoop/hbase/backup/util/BackupSet.java     |   58 +
 .../hadoop/hbase/backup/util/BackupUtils.java   |  747 +++++++
 .../hadoop/hbase/backup/util/RestoreTool.java   |  516 +++++
 .../hadoop/hbase/backup/TestBackupBase.java     |  503 +++++
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  431 ++++
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../backup/TestBackupDeleteWithFailures.java    |  194 ++
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 +
 .../hbase/backup/TestBackupHFileCleaner.java    |  141 ++
 .../hbase/backup/TestBackupMultipleDeletes.java |  158 ++
 .../hadoop/hbase/backup/TestBackupRepair.java   |   91 +
 .../hbase/backup/TestBackupShowHistory.java     |  148 ++
 .../hbase/backup/TestBackupStatusProgress.java  |   96 +
 .../hbase/backup/TestBackupSystemTable.java     |  511 +++++
 .../hadoop/hbase/backup/TestFullBackup.java     |   59 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 +
 .../backup/TestFullBackupSetRestoreSet.java     |  128 ++
 .../backup/TestFullBackupWithFailures.java      |   79 +
 .../hadoop/hbase/backup/TestFullRestore.java    |  345 +++
 .../hbase/backup/TestIncrementalBackup.java     |  209 ++
 .../TestIncrementalBackupDeleteTable.java       |  129 ++
 .../TestIncrementalBackupMergeWithFailures.java |  336 +++
 .../TestIncrementalBackupWithBulkLoad.java      |  145 ++
 .../TestIncrementalBackupWithFailures.java      |  161 ++
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  135 ++
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   59 +
 .../backup/TestRepairAfterFailedDelete.java     |   93 +
 .../hbase/backup/TestRestoreBoundaryTests.java  |   80 +
 .../hbase/backup/TestSystemTableSnapshot.java   |   55 +
 .../backup/master/TestBackupLogCleaner.java     |  162 ++
 .../src/test/resources/log4j.properties         |   68 +
 hbase-it/pom.xml                                |    5 +
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  136 --
 .../hbase/backup/BackupClientFactory.java       |   53 -
 .../hadoop/hbase/backup/BackupCopyJob.java      |   55 -
 .../hadoop/hbase/backup/BackupDriver.java       |  210 --
 .../hadoop/hbase/backup/BackupHFileCleaner.java |  180 --
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  550 -----
 .../hadoop/hbase/backup/BackupMergeJob.java     |   40 -
 .../hadoop/hbase/backup/BackupObserver.java     |  102 -
 .../hadoop/hbase/backup/BackupRequest.java      |  139 --
 .../hbase/backup/BackupRestoreConstants.java    |  123 --
 .../hbase/backup/BackupRestoreFactory.java      |   82 -
 .../hadoop/hbase/backup/BackupTableInfo.java    |   82 -
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  146 --
 .../apache/hadoop/hbase/backup/LogUtils.java    |   50 -
 .../hadoop/hbase/backup/RestoreDriver.java      |  265 ---
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 -
 .../hadoop/hbase/backup/RestoreRequest.java     |  135 --
 .../hbase/backup/impl/BackupAdminImpl.java      |  743 -------
 .../hbase/backup/impl/BackupCommands.java       | 1022 ---------
 .../hbase/backup/impl/BackupException.java      |   84 -
 .../hadoop/hbase/backup/impl/BackupManager.java |  502 -----
 .../hbase/backup/impl/BackupManifest.java       |  674 ------
 .../hbase/backup/impl/BackupSystemTable.java    | 2051 ------------------
 .../backup/impl/FullTableBackupClient.java      |  224 --
 .../backup/impl/IncrementalBackupManager.java   |  387 ----
 .../impl/IncrementalTableBackupClient.java      |  377 ----
 .../hbase/backup/impl/RestoreTablesClient.java  |  278 ---
 .../hbase/backup/impl/TableBackupClient.java    |  436 ----
 .../mapreduce/MapReduceBackupCopyJob.java       |  344 ---
 .../mapreduce/MapReduceBackupMergeJob.java      |  321 ---
 .../mapreduce/MapReduceHFileSplitterJob.java    |  181 --
 .../backup/mapreduce/MapReduceRestoreJob.java   |  136 --
 .../hbase/backup/master/BackupLogCleaner.java   |  142 --
 .../master/LogRollMasterProcedureManager.java   |  155 --
 .../regionserver/LogRollBackupSubprocedure.java |  168 --
 .../LogRollBackupSubprocedurePool.java          |  139 --
 .../LogRollRegionServerProcedureManager.java    |  185 --
 .../hadoop/hbase/backup/util/BackupSet.java     |   58 -
 .../hadoop/hbase/backup/util/BackupUtils.java   |  747 -------
 .../hadoop/hbase/backup/util/RestoreTool.java   |  516 -----
 .../hadoop/hbase/backup/TestBackupBase.java     |  503 -----
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 -
 .../hbase/backup/TestBackupCommandLineTool.java |  431 ----
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 -
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 -
 .../backup/TestBackupDeleteWithFailures.java    |  194 --
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 -
 .../hbase/backup/TestBackupHFileCleaner.java    |  141 --
 .../hbase/backup/TestBackupMultipleDeletes.java |  158 --
 .../hadoop/hbase/backup/TestBackupRepair.java   |   91 -
 .../hbase/backup/TestBackupShowHistory.java     |  148 --
 .../hbase/backup/TestBackupStatusProgress.java  |   96 -
 .../hbase/backup/TestBackupSystemTable.java     |  511 -----
 .../hadoop/hbase/backup/TestFullBackup.java     |   59 -
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 -
 .../backup/TestFullBackupSetRestoreSet.java     |  128 --
 .../backup/TestFullBackupWithFailures.java      |   79 -
 .../hadoop/hbase/backup/TestFullRestore.java    |  345 ---
 .../hbase/backup/TestIncrementalBackup.java     |  209 --
 .../TestIncrementalBackupDeleteTable.java       |  129 --
 .../TestIncrementalBackupMergeWithFailures.java |  336 ---
 .../TestIncrementalBackupWithBulkLoad.java      |  145 --
 .../TestIncrementalBackupWithFailures.java      |  161 --
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  135 --
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   59 -
 .../backup/TestRepairAfterFailedDelete.java     |   93 -
 .../hbase/backup/TestRestoreBoundaryTests.java  |   80 -
 .../hbase/backup/TestSystemTableSnapshot.java   |   55 -
 .../backup/master/TestBackupLogCleaner.java     |  162 --
 pom.xml                                         |    1 +
 146 files changed, 17540 insertions(+), 17195 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 14074ad..c9488ca 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -281,8 +281,12 @@
       <version>${project.version}</version>
       <type>test-jar</type>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-backup</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
-
   <profiles>
     <profile>
       <id>rsgroup</id>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index 8039c4d..1592a3b 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -49,6 +49,7 @@
         <include>org.apache.hbase:hbase-spark</include>
         <include>org.apache.hbase:hbase-thrift</include>
         <include>org.apache.hbase:hbase-external-blockcache</include>
+        <include>org.apache.hbase:hbase-backup</include>
       </includes>
       <!-- Binaries for the dependencies also go in the hbase-jars directory -->
       <binaries>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-assembly/src/main/assembly/src.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/src/main/assembly/src.xml b/hbase-assembly/src/main/assembly/src.xml
index 436b1ff..b00f05f 100644
--- a/hbase-assembly/src/main/assembly/src.xml
+++ b/hbase-assembly/src/main/assembly/src.xml
@@ -61,6 +61,7 @@
         <include>org.apache.hbase:hbase-spark-it</include>
         <include>org.apache.hbase:hbase-testing-util</include>
         <include>org.apache.hbase:hbase-thrift</include>
+        <include>org.apache.hbase:hbase-backup</include>
       </includes>
       <!-- Include all the sources in the top directory -->
       <sources>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/.DS_Store
----------------------------------------------------------------------
diff --git a/hbase-backup/.DS_Store b/hbase-backup/.DS_Store
new file mode 100644
index 0000000..ab57a77
Binary files /dev/null and b/hbase-backup/.DS_Store differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml
new file mode 100644
index 0000000..7c7d8b5
--- /dev/null
+++ b/hbase-backup/pom.xml
@@ -0,0 +1,265 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <!--
+  /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+  -->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>hbase-build-configuration</artifactId>
+    <groupId>org.apache.hbase</groupId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../hbase-build-configuration</relativePath>
+  </parent>
+  <artifactId>hbase-backup</artifactId>
+  <name>Apache HBase - Backup</name>
+  <description>Backup for HBase</description>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-site-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <!--Make it so assembly:single does nothing in here-->
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <skipAssembly>true</skipAssembly>
+        </configuration>
+      </plugin>
+      <!-- Make a jar and put the sources in the jar -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-source-plugin</artifactId>
+      </plugin>
+    </plugins>
+    <pluginManagement>
+      <plugins>
+        <!--This plugin's configuration is used to store Eclipse m2e settings
+             only. It has no influence on the Maven build itself.-->
+        <plugin>
+          <groupId>org.eclipse.m2e</groupId>
+          <artifactId>lifecycle-mapping</artifactId>
+          <configuration>
+            <lifecycleMappingMetadata>
+              <pluginExecutions>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-compiler-plugin</artifactId>
+                    <versionRange>[3.2,)</versionRange>
+                    <goals>
+                      <goal>compile</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore/>
+                  </action>
+                </pluginExecution>
+              </pluginExecutions>
+            </lifecycleMappingMetadata>
+          </configuration>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+  </build>
+  <dependencies>
+    <!-- Intra-project dependencies -->
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-annotations</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-annotations</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-protocol-shaded</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-testing-util</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <!-- General dependencies -->
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase.thirdparty</groupId>
+      <artifactId>hbase-shaded-miscellaneous</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase.thirdparty</groupId>
+      <artifactId>hbase-shaded-netty</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <profiles>
+    <!-- profile against Hadoop 2.x: This is the default. -->
+    <profile>
+      <id>hadoop-2.0</id>
+      <activation>
+        <property>
+          <!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
+          <!--h2-->
+          <name>!hadoop.profile</name>
+        </property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>com.github.stephenc.findbugs</groupId>
+          <artifactId>findbugs-annotations</artifactId>
+          <optional>true</optional>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-auth</artifactId>
+        </dependency>
+        <!--dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>net.java.dev.jets3t</groupId>
+              <artifactId>jets3t</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>javax.servlet.jsp</groupId>
+              <artifactId>jsp-api</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>org.mortbay.jetty</groupId>
+              <artifactId>jetty</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>com.sun.jersey</groupId>
+              <artifactId>jersey-server</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>com.sun.jersey</groupId>
+              <artifactId>jersey-core</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>com.sun.jersey</groupId>
+              <artifactId>jersey-json</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>javax.servlet</groupId>
+              <artifactId>servlet-api</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>tomcat</groupId>
+              <artifactId>jasper-compiler</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>tomcat</groupId>
+              <artifactId>jasper-runtime</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>com.google.code.findbugs</groupId>
+              <artifactId>jsr305</artifactId>
+            </exclusion>
+          </exclusions>
+        </dependency-->
+<!-- Hadoop needs Netty 3.x at test scope for the minicluster -->
+        <dependency>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+          <version>${netty.hadoop.version}</version>
+          <scope>test</scope>
+        </dependency>
+      </dependencies>
+    </profile>
+    <!--
+      profile for building against Hadoop 3.0.x. Activate using:
+       mvn -Dhadoop.profile=3.0
+    -->
+    <profile>
+      <id>hadoop-3.0</id>
+      <activation>
+        <property>
+          <name>hadoop.profile</name>
+          <value>3.0</value>
+        </property>
+      </activation>
+      <properties>
+        <hadoop.version>3.0-SNAPSHOT</hadoop.version>
+      </properties>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-auth</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </dependency>
+      </dependencies>
+    </profile>
+  </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
new file mode 100644
index 0000000..9dc6382
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * The administrative API for HBase Backup. Construct an instance and call {@link #close()}
+ * afterwards.
+ * <p>
+ * BackupAdmin can be used to create backups, restore data from backups and for other
+ * backup-related operations.
+ * @since 2.0
+ */
+@InterfaceAudience.Private
+public interface BackupAdmin extends Closeable {
+
+  /**
+   * Backup given list of tables fully. This is a synchronous operation. It returns backup id on
+   * success or throw exception on failure.
+   * @param userRequest BackupRequest instance
+   * @return the backup Id
+   */
+
+  String backupTables(final BackupRequest userRequest) throws IOException;
+
+  /**
+   * Restore backup
+   * @param request restore request
+   * @throws IOException exception
+   */
+  void restore(RestoreRequest request) throws IOException;
+
+  /**
+   * Describe backup image command
+   * @param backupId backup id
+   * @return backup info
+   * @throws IOException exception
+   */
+  BackupInfo getBackupInfo(String backupId) throws IOException;
+
+  /**
+   * Delete backup image command
+   * @param backupIds array of backup ids
+   * @return total number of deleted sessions
+   * @throws IOException exception
+   */
+  int deleteBackups(String[] backupIds) throws IOException;
+
+  /**
+   * Merge backup images command
+   * @param backupIds array of backup ids of images to be merged
+   *        The resulting backup image will have the same backup id as the most
+   *        recent image from a list of images to be merged
+   * @throws IOException exception
+   */
+  void mergeBackups(String[] backupIds) throws IOException;
+
+  /**
+   * Show backup history command
+   * @param n last n backup sessions
+   * @return list of backup info objects
+   * @throws IOException exception
+   */
+  List<BackupInfo> getHistory(int n) throws IOException;
+
+  /**
+   * Show backup history command with filters
+   * @param n last n backup sessions
+   * @param f list of filters
+   * @return list of backup info objects
+   * @throws IOException exception
+   */
+  List<BackupInfo> getHistory(int n, BackupInfo.Filter... f) throws IOException;
+
+  /**
+   * Backup sets list command - list all backup sets. Backup set is a named group of tables.
+   * @return all registered backup sets
+   * @throws IOException exception
+   */
+  List<BackupSet> listBackupSets() throws IOException;
+
+  /**
+   * Backup set describe command. Shows list of tables in this particular backup set.
+   * @param name set name
+   * @return backup set description or null
+   * @throws IOException exception
+   */
+  BackupSet getBackupSet(String name) throws IOException;
+
+  /**
+   * Delete backup set command
+   * @param name backup set name
+   * @return true, if success, false - otherwise
+   * @throws IOException exception
+   */
+  boolean deleteBackupSet(String name) throws IOException;
+
+  /**
+   * Add tables to backup set command
+   * @param name name of backup set.
+   * @param tables array of tables to be added to this set.
+   * @throws IOException exception
+   */
+  void addToBackupSet(String name, TableName[] tables) throws IOException;
+
+  /**
+   * Remove tables from backup set
+   * @param name name of backup set.
+   * @param tables array of tables to be removed from this set.
+   * @throws IOException exception
+   */
+  void removeFromBackupSet(String name, TableName[] tables) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
new file mode 100644
index 0000000..21d73cc
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
+import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
+import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
+import org.apache.hadoop.hbase.client.Connection;
+
+public class BackupClientFactory {
+
+  public static TableBackupClient create (Connection conn, String backupId, BackupRequest request)
+    throws IOException
+  {
+    Configuration conf = conn.getConfiguration();
+    try {
+      String clsName = conf.get(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS);
+      if (clsName != null) {
+        Class<?> clientImpl = Class.forName(clsName);
+        TableBackupClient client = (TableBackupClient) clientImpl.newInstance();
+        client.init(conn, backupId, request);
+        return client;
+      }
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+
+    BackupType type = request.getBackupType();
+    if (type == BackupType.FULL) {
+      return new FullTableBackupClient(conn, backupId, request);
+    } else {
+      return new IncrementalTableBackupClient(conn, backupId, request);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
new file mode 100644
index 0000000..007e4c1
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Backup copy job is a part of a backup process. The concrete implementation is responsible for
+ * copying data from a cluster to backup destination. Concrete implementation is provided by backup
+ * provider, see {@link BackupRestoreFactory}
+ */
+@InterfaceAudience.Private
+public interface BackupCopyJob extends Configurable {
+
+  /**
+   * Copy backup data to destination
+   * @param backupInfo context object
+   * @param backupManager backup manager
+   * @param conf configuration
+   * @param backupType backup type (FULL or INCREMENTAL)
+   * @param options array of options (implementation-specific)
+   * @return result (0 - success, -1 failure )
+   * @throws IOException exception
+   */
+  int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf,
+      BackupType backupType, String[] options) throws IOException;
+
+  /**
+   * Cancel copy job
+   * @param jobHandler backup copy job handler
+   * @throws IOException
+   */
+  void cancel(String jobHandler) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
new file mode 100644
index 0000000..9dd8531
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.backup.impl.BackupCommands;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ *
+ * Command-line entry point for backup operation
+ *
+ */
+@InterfaceAudience.Private
+public class BackupDriver extends AbstractHBaseTool {
+
+  private static final Log LOG = LogFactory.getLog(BackupDriver.class);
+  private CommandLine cmd;
+
+  public BackupDriver() throws IOException {
+    init();
+  }
+
+  protected void init() throws IOException {
+    // disable irrelevant loggers to avoid it mess up command output
+    LogUtils.disableZkAndClientLoggers(LOG);
+  }
+
+  private int parseAndRun(String[] args) throws IOException {
+
+    // Check if backup is enabled
+    if (!BackupManager.isBackupEnabled(getConf())) {
+      System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
+      return -1;
+    }
+
+    System.out.println(BackupRestoreConstants.VERIFY_BACKUP);
+
+    String cmd = null;
+    String[] remainArgs = null;
+    if (args == null || args.length == 0) {
+      printToolUsage();
+      return -1;
+    } else {
+      cmd = args[0];
+      remainArgs = new String[args.length - 1];
+      if (args.length > 1) {
+        System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
+      }
+    }
+
+    BackupCommand type = BackupCommand.HELP;
+    if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.CREATE;
+    } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.HELP;
+    } else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.DELETE;
+    } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.DESCRIBE;
+    } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.HISTORY;
+    } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.PROGRESS;
+    } else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.SET;
+    } else if (BackupCommand.REPAIR.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.REPAIR;
+    } else if (BackupCommand.MERGE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.MERGE;
+    } else {
+      System.out.println("Unsupported command for backup: " + cmd);
+      printToolUsage();
+      return -1;
+    }
+
+    // enable debug logging
+    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+    if (this.cmd.hasOption(OPTION_DEBUG)) {
+      backupClientLogger.setLevel(Level.DEBUG);
+    } else {
+      backupClientLogger.setLevel(Level.INFO);
+    }
+
+    BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd);
+    if (type == BackupCommand.CREATE && conf != null) {
+      ((BackupCommands.CreateCommand) command).setConf(conf);
+    }
+    try {
+      command.execute();
+    } catch (IOException e) {
+      if (e.getMessage().equals(BackupCommands.INCORRECT_USAGE)) {
+        return -1;
+      }
+      throw e;
+    } finally {
+      command.finish();
+    }
+    return 0;
+  }
+
+  @Override
+  protected void addOptions() {
+    // define supported options
+    addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC);
+    addOptWithArg(OPTION_TABLE, OPTION_TABLE_DESC);
+    addOptWithArg(OPTION_BANDWIDTH, OPTION_BANDWIDTH_DESC);
+    addOptWithArg(OPTION_WORKERS, OPTION_WORKERS_DESC);
+    addOptWithArg(OPTION_RECORD_NUMBER, OPTION_RECORD_NUMBER_DESC);
+    addOptWithArg(OPTION_SET, OPTION_SET_DESC);
+    addOptWithArg(OPTION_PATH, OPTION_PATH_DESC);
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    this.cmd = cmd;
+  }
+
+  @Override
+  protected int doWork() throws Exception {
+    return parseAndRun(cmd.getArgs());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    Path hbasedir = FSUtils.getRootDir(conf);
+    URI defaultFs = hbasedir.getFileSystem(conf).getUri();
+    FSUtils.setFsDefault(conf, new Path(defaultFs));
+    int ret = ToolRunner.run(conf, new BackupDriver(), args);
+    System.exit(ret);
+  }
+
+  @Override
+  public int run(String[] args) throws IOException {
+    if (conf == null) {
+      LOG.error("Tool configuration is not initialized");
+      throw new NullPointerException("conf");
+    }
+
+    CommandLine cmd;
+    try {
+      // parse the command line arguments
+      cmd = parseArgs(args);
+      cmdLineArgs = args;
+    } catch (Exception e) {
+      System.err.println("Error when parsing command-line arguments: " + e.getMessage());
+      printToolUsage();
+      return EXIT_FAILURE;
+    }
+    processOptions(cmd);
+
+    int ret = EXIT_FAILURE;
+    try {
+      ret = doWork();
+    } catch (Exception e) {
+      LOG.error("Error running command-line tool", e);
+      return EXIT_FAILURE;
+    }
+    return ret;
+  }
+
+  protected void printToolUsage() throws IOException {
+    System.out.println(BackupCommands.USAGE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
new file mode 100644
index 0000000..ed554ad
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Predicate;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterables;
+
+/**
+ * Implementation of a file cleaner that checks if an hfile is still referenced by backup before
+ * deleting it from hfile archive directory.
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abortable {
+  private static final Log LOG = LogFactory.getLog(BackupHFileCleaner.class);
+  private boolean stopped = false;
+  private boolean aborted;
+  private Configuration conf;
+  private Connection connection;
+  private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
+      secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
+  //used by unit test to skip reading backup:system
+  private boolean checkForFullyBackedUpTables = true;
+  private List<TableName> fullyBackedUpTables = null;
+
+  private Set<String> getFilenameFromBulkLoad(Map<byte[], List<Path>>[] maps) {
+    Set<String> filenames = new HashSet<String>();
+    for (Map<byte[], List<Path>> map : maps) {
+      if (map == null) continue;
+      for (List<Path> paths : map.values()) {
+        for (Path p : paths) {
+          filenames.add(p.getName());
+        }
+      }
+    }
+    return filenames;
+  }
+
+  private Set<String> loadHFileRefs(List<TableName> tableList) throws IOException {
+    if (connection == null) {
+      connection = ConnectionFactory.createConnection(conf);
+    }
+    try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
+      Map<byte[], List<Path>>[] res =
+          tbl.readBulkLoadedFiles(null, tableList);
+      secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
+      prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
+      return getFilenameFromBulkLoad(res);
+    }
+  }
+
+  @VisibleForTesting
+  void setCheckForFullyBackedUpTables(boolean b) {
+    checkForFullyBackedUpTables = b;
+  }
+  @Override
+  public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
+    if (conf == null) {
+      return files;
+    }
+    // obtain the Set of TableName's which have been fully backed up
+    // so that we filter BulkLoad to be returned from server
+    if (checkForFullyBackedUpTables) {
+      if (connection == null) return files;
+      try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
+        fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
+      } catch (IOException ioe) {
+        LOG.error("Failed to get tables which have been fully backed up, skipping checking", ioe);
+        return Collections.emptyList();
+      }
+      Collections.sort(fullyBackedUpTables);
+    }
+    final Set<String> hfileRefs;
+    try {
+      hfileRefs = loadHFileRefs(fullyBackedUpTables);
+    } catch (IOException ioe) {
+      LOG.error("Failed to read hfile references, skipping checking deletable files", ioe);
+      return Collections.emptyList();
+    }
+    Iterable<FileStatus> deletables = Iterables.filter(files, new Predicate<FileStatus>() {
+      @Override
+      public boolean apply(FileStatus file) {
+        // If the file is recent, be conservative and wait for one more scan of backup:system table
+        if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
+          return false;
+        }
+        String hfile = file.getPath().getName();
+        boolean foundHFileRef = hfileRefs.contains(hfile);
+        return !foundHFileRef;
+      }
+    });
+    return deletables;
+  }
+
+  @Override
+  public boolean isFileDeletable(FileStatus fStat) {
+    // work is done in getDeletableFiles()
+    return true;
+  }
+
+  @Override
+  public void setConf(Configuration config) {
+    this.conf = config;
+    this.connection = null;
+    try {
+      this.connection = ConnectionFactory.createConnection(conf);
+    } catch (IOException ioe) {
+      LOG.error("Couldn't establish connection", ioe);
+    }
+  }
+
+  @Override
+  public void stop(String why) {
+    if (this.stopped) {
+      return;
+    }
+    if (this.connection != null) {
+      try {
+        this.connection.close();
+      } catch (IOException ioe) {
+        LOG.debug("Got " + ioe + " when closing connection");
+      }
+    }
+    this.stopped = true;
+  }
+
+  @Override
+  public boolean isStopped() {
+    return this.stopped;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+    LOG.warn("Aborting ReplicationHFileCleaner because " + why, e);
+    this.aborted = true;
+    stop(why);
+  }
+
+  @Override
+  public boolean isAborted() {
+    return this.aborted;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
new file mode 100644
index 0000000..1765bf3
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
@@ -0,0 +1,550 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * An object to encapsulate the information for each backup session
+ */
+@InterfaceAudience.Private
+public class BackupInfo implements Comparable<BackupInfo> {
+  private static final Log LOG = LogFactory.getLog(BackupInfo.class);
+
+  public static interface Filter {
+
+    /**
+     * Filter interface
+     * @param info backup info
+     * @return true if info passes filter, false otherwise
+     */
+    public boolean apply(BackupInfo info);
+  }
+
+  /**
+   * Backup session states
+   */
+  public static enum BackupState {
+    RUNNING, COMPLETE, FAILED, ANY;
+  }
+
+  /**
+   * BackupPhase - phases of an ACTIVE backup session (running), when state of a backup session is
+   * BackupState.RUNNING
+   */
+  public static enum BackupPhase {
+    REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
+  }
+
+  /**
+   * Backup id
+   */
+  private String backupId;
+
+  /**
+   * Backup type, full or incremental
+   */
+  private BackupType type;
+
+  /**
+   * Target root directory for storing the backup files
+   */
+  private String backupRootDir;
+
+  /**
+   * Backup state
+   */
+  private BackupState state;
+
+  /**
+   * Backup phase
+   */
+  private BackupPhase phase = BackupPhase.REQUEST;
+
+  /**
+   * Backup failure message
+   */
+  private String failedMsg;
+
+  /**
+   * Backup status map for all tables
+   */
+  private Map<TableName, BackupTableInfo> backupTableInfoMap;
+
+  /**
+   * Actual start timestamp of a backup process
+   */
+  private long startTs;
+
+  /**
+   * Actual end timestamp of the backup process
+   */
+  private long completeTs;
+
+  /**
+   * Total bytes of incremental logs copied
+   */
+  private long totalBytesCopied;
+
+  /**
+   * For incremental backup, a location of a backed-up hlogs
+   */
+  private String hlogTargetDir = null;
+
+  /**
+   * Incremental backup file list
+   */
+  private List<String> incrBackupFileList;
+
+  /**
+   * New region server log timestamps for table set after distributed log roll key - table name,
+   * value - map of RegionServer hostname -> last log rolled timestamp
+   */
+  private HashMap<TableName, HashMap<String, Long>> tableSetTimestampMap;
+
+  /**
+   * Backup progress in %% (0-100)
+   */
+  private int progress;
+
+  /**
+   * Number of parallel workers. -1 - system defined
+   */
+  private int workers = -1;
+
+  /**
+   * Bandwidth per worker in MB per sec. -1 - unlimited
+   */
+  private long bandwidth = -1;
+
+  public BackupInfo() {
+    backupTableInfoMap = new HashMap<TableName, BackupTableInfo>();
+  }
+
+  public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) {
+    this();
+    this.backupId = backupId;
+    this.type = type;
+    this.backupRootDir = targetRootDir;
+    this.addTables(tables);
+    if (type == BackupType.INCREMENTAL) {
+      setHLogTargetDir(BackupUtils.getLogBackupDir(targetRootDir, backupId));
+    }
+    this.startTs = 0;
+    this.completeTs = 0;
+  }
+
+  public int getWorkers() {
+    return workers;
+  }
+
+  public void setWorkers(int workers) {
+    this.workers = workers;
+  }
+
+  public long getBandwidth() {
+    return bandwidth;
+  }
+
+  public void setBandwidth(long bandwidth) {
+    this.bandwidth = bandwidth;
+  }
+
+  public void setBackupTableInfoMap(Map<TableName, BackupTableInfo> backupTableInfoMap) {
+    this.backupTableInfoMap = backupTableInfoMap;
+  }
+
+  public HashMap<TableName, HashMap<String, Long>> getTableSetTimestampMap() {
+    return tableSetTimestampMap;
+  }
+
+  public void setTableSetTimestampMap(HashMap<TableName,
+                                      HashMap<String, Long>> tableSetTimestampMap) {
+    this.tableSetTimestampMap = tableSetTimestampMap;
+  }
+
+  public void setType(BackupType type) {
+    this.type = type;
+  }
+
+  public void setBackupRootDir(String targetRootDir) {
+    this.backupRootDir = targetRootDir;
+  }
+
+  public void setTotalBytesCopied(long totalBytesCopied) {
+    this.totalBytesCopied = totalBytesCopied;
+  }
+
+  /**
+   * Set progress (0-100%)
+   * @param p progress value
+   */
+
+  public void setProgress(int p) {
+    this.progress = p;
+  }
+
+  /**
+   * Get current progress
+   */
+  public int getProgress() {
+    return progress;
+  }
+
+  public String getBackupId() {
+    return backupId;
+  }
+
+  public void setBackupId(String backupId) {
+    this.backupId = backupId;
+  }
+
+  public BackupTableInfo getBackupTableInfo(TableName table) {
+    return this.backupTableInfoMap.get(table);
+  }
+
+  public String getFailedMsg() {
+    return failedMsg;
+  }
+
+  public void setFailedMsg(String failedMsg) {
+    this.failedMsg = failedMsg;
+  }
+
+  public long getStartTs() {
+    return startTs;
+  }
+
+  public void setStartTs(long startTs) {
+    this.startTs = startTs;
+  }
+
+  public long getCompleteTs() {
+    return completeTs;
+  }
+
+  public void setCompleteTs(long endTs) {
+    this.completeTs = endTs;
+  }
+
+  public long getTotalBytesCopied() {
+    return totalBytesCopied;
+  }
+
+  public BackupState getState() {
+    return state;
+  }
+
+  public void setState(BackupState flag) {
+    this.state = flag;
+  }
+
+  public BackupPhase getPhase() {
+    return phase;
+  }
+
+  public void setPhase(BackupPhase phase) {
+    this.phase = phase;
+  }
+
+  public BackupType getType() {
+    return type;
+  }
+
+  public void setSnapshotName(TableName table, String snapshotName) {
+    this.backupTableInfoMap.get(table).setSnapshotName(snapshotName);
+  }
+
+  public String getSnapshotName(TableName table) {
+    return this.backupTableInfoMap.get(table).getSnapshotName();
+  }
+
+  public List<String> getSnapshotNames() {
+    List<String> snapshotNames = new ArrayList<String>();
+    for (BackupTableInfo backupStatus : this.backupTableInfoMap.values()) {
+      snapshotNames.add(backupStatus.getSnapshotName());
+    }
+    return snapshotNames;
+  }
+
+  public Set<TableName> getTables() {
+    return this.backupTableInfoMap.keySet();
+  }
+
+  public List<TableName> getTableNames() {
+    return new ArrayList<TableName>(backupTableInfoMap.keySet());
+  }
+
+  public void addTables(TableName[] tables) {
+    for (TableName table : tables) {
+      BackupTableInfo backupStatus = new BackupTableInfo(table, this.backupRootDir, this.backupId);
+      this.backupTableInfoMap.put(table, backupStatus);
+    }
+  }
+
+  public void setTables(List<TableName> tables) {
+    this.backupTableInfoMap.clear();
+    for (TableName table : tables) {
+      BackupTableInfo backupStatus = new BackupTableInfo(table, this.backupRootDir, this.backupId);
+      this.backupTableInfoMap.put(table, backupStatus);
+    }
+  }
+
+  public String getBackupRootDir() {
+    return backupRootDir;
+  }
+
+  public String getTableBackupDir(TableName tableName) {
+    return BackupUtils.getTableBackupDir(backupRootDir, backupId, tableName);
+  }
+
+  public void setHLogTargetDir(String hlogTagetDir) {
+    this.hlogTargetDir = hlogTagetDir;
+  }
+
+  public String getHLogTargetDir() {
+    return hlogTargetDir;
+  }
+
+  public List<String> getIncrBackupFileList() {
+    return incrBackupFileList;
+  }
+
+  public void setIncrBackupFileList(List<String> incrBackupFileList) {
+    this.incrBackupFileList = incrBackupFileList;
+  }
+
+  /**
+   * Set the new region server log timestamps after distributed log roll
+   * @param newTableSetTimestampMap table timestamp map
+   */
+  public void
+      setIncrTimestampMap(HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap) {
+    this.tableSetTimestampMap = newTableSetTimestampMap;
+  }
+
+  /**
+   * Get new region server log timestamps after distributed log roll
+   * @return new region server log timestamps
+   */
+  public HashMap<TableName, HashMap<String, Long>> getIncrTimestampMap() {
+    return this.tableSetTimestampMap;
+  }
+
+  public TableName getTableBySnapshot(String snapshotName) {
+    for (Entry<TableName, BackupTableInfo> entry : this.backupTableInfoMap.entrySet()) {
+      if (snapshotName.equals(entry.getValue().getSnapshotName())) {
+        return entry.getKey();
+      }
+    }
+    return null;
+  }
+
+  public BackupProtos.BackupInfo toProtosBackupInfo() {
+    BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder();
+    builder.setBackupId(getBackupId());
+    setBackupTableInfoMap(builder);
+    builder.setCompleteTs(getCompleteTs());
+    if (getFailedMsg() != null) {
+      builder.setFailedMessage(getFailedMsg());
+    }
+    if (getState() != null) {
+      builder.setBackupState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name()));
+    }
+    if (getPhase() != null) {
+      builder.setBackupPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name()));
+    }
+
+    builder.setProgress(getProgress());
+    builder.setStartTs(getStartTs());
+    builder.setBackupRootDir(getBackupRootDir());
+    builder.setBackupType(BackupProtos.BackupType.valueOf(getType().name()));
+    builder.setWorkersNumber(workers);
+    builder.setBandwidth(bandwidth);
+    return builder.build();
+  }
+
+  @Override
+  public int hashCode() {
+    int hash = 33 * type.hashCode() + backupId != null ? backupId.hashCode() : 0;
+    if (backupRootDir != null) {
+      hash = 33 * hash + backupRootDir.hashCode();
+    }
+    hash = 33 * hash + state.hashCode();
+    hash = 33 * hash + phase.hashCode();
+    hash = 33 * hash + (int) (startTs ^ (startTs >>> 32));
+    hash = 33 * hash + (int) (completeTs ^ (completeTs >>> 32));
+    hash = 33 * hash + (int) (totalBytesCopied ^ (totalBytesCopied >>> 32));
+    if (hlogTargetDir != null) {
+      hash = 33 * hash + hlogTargetDir.hashCode();
+    }
+    return hash;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof BackupInfo) {
+      BackupInfo other = (BackupInfo) obj;
+      try {
+        return Bytes.equals(toByteArray(), other.toByteArray());
+      } catch (IOException e) {
+        LOG.error(e);
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  public String toString() {
+    return backupId;
+  }
+
+  public byte[] toByteArray() throws IOException {
+    return toProtosBackupInfo().toByteArray();
+  }
+
+  private void setBackupTableInfoMap(Builder builder) {
+    for (Entry<TableName, BackupTableInfo> entry : backupTableInfoMap.entrySet()) {
+      builder.addBackupTableInfo(entry.getValue().toProto());
+    }
+  }
+
+  public static BackupInfo fromByteArray(byte[] data) throws IOException {
+    return fromProto(BackupProtos.BackupInfo.parseFrom(data));
+  }
+
+  public static BackupInfo fromStream(final InputStream stream) throws IOException {
+    return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream));
+  }
+
+  public static BackupInfo fromProto(BackupProtos.BackupInfo proto) {
+    BackupInfo context = new BackupInfo();
+    context.setBackupId(proto.getBackupId());
+    context.setBackupTableInfoMap(toMap(proto.getBackupTableInfoList()));
+    context.setCompleteTs(proto.getCompleteTs());
+    if (proto.hasFailedMessage()) {
+      context.setFailedMsg(proto.getFailedMessage());
+    }
+    if (proto.hasBackupState()) {
+      context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name()));
+    }
+
+    context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(),
+      proto.getBackupId()));
+
+    if (proto.hasBackupPhase()) {
+      context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name()));
+    }
+    if (proto.hasProgress()) {
+      context.setProgress(proto.getProgress());
+    }
+    context.setStartTs(proto.getStartTs());
+    context.setBackupRootDir(proto.getBackupRootDir());
+    context.setType(BackupType.valueOf(proto.getBackupType().name()));
+    context.setWorkers(proto.getWorkersNumber());
+    context.setBandwidth(proto.getBandwidth());
+    return context;
+  }
+
+  private static Map<TableName, BackupTableInfo> toMap(List<BackupProtos.BackupTableInfo> list) {
+    HashMap<TableName, BackupTableInfo> map = new HashMap<>();
+    for (BackupProtos.BackupTableInfo tbs : list) {
+      map.put(ProtobufUtil.toTableName(tbs.getTableName()), BackupTableInfo.convert(tbs));
+    }
+    return map;
+  }
+
+  public String getShortDescription() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("{");
+    sb.append("ID=" + backupId).append(",");
+    sb.append("Type=" + getType()).append(",");
+    sb.append("Tables=" + getTableListAsString()).append(",");
+    sb.append("State=" + getState()).append(",");
+    Date date = null;
+    Calendar cal = Calendar.getInstance();
+    cal.setTimeInMillis(getStartTs());
+    date = cal.getTime();
+    sb.append("Start time=" + date).append(",");
+    if (state == BackupState.FAILED) {
+      sb.append("Failed message=" + getFailedMsg()).append(",");
+    } else if (state == BackupState.RUNNING) {
+      sb.append("Phase=" + getPhase()).append(",");
+    } else if (state == BackupState.COMPLETE) {
+      cal = Calendar.getInstance();
+      cal.setTimeInMillis(getCompleteTs());
+      date = cal.getTime();
+      sb.append("End time=" + date).append(",");
+    }
+    sb.append("Progress=" + getProgress() + "%");
+    sb.append("}");
+
+    return sb.toString();
+  }
+
+  public String getStatusAndProgressAsString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
+        .append(" progress: ").append(getProgress());
+    return sb.toString();
+  }
+
+  public String getTableListAsString() {
+    StringBuffer sb = new StringBuffer();
+    sb.append("{");
+    sb.append(StringUtils.join(backupTableInfoMap.keySet(), ","));
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * We use only time stamps to compare objects during sort operation
+   */
+  @Override
+  public int compareTo(BackupInfo o) {
+    Long thisTS =
+        Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
+    Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
+    return thisTS.compareTo(otherTS);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
new file mode 100644
index 0000000..136782f
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Backup merge operation job interface. Concrete implementation is provided by backup provider, see
+ * {@link BackupRestoreFactory}
+ */
+
+@InterfaceAudience.Private
+public interface BackupMergeJob extends Configurable {
+
+  /**
+   * Run backup merge operation
+   * @param backupIds backup image ids
+   * @throws IOException
+   */
+  void run(String[] backupIds) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java
new file mode 100644
index 0000000..595e862
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * An Observer to facilitate backup operations
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class BackupObserver implements RegionObserver {
+  private static final Log LOG = LogFactory.getLog(BackupObserver.class);
+  @Override
+  public boolean postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
+    List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths,
+    boolean hasLoaded) throws IOException {
+    Configuration cfg = ctx.getEnvironment().getConfiguration();
+    if (!hasLoaded) {
+      // there is no need to record state
+      return hasLoaded;
+    }
+    if (finalPaths == null || !BackupManager.isBackupEnabled(cfg)) {
+      LOG.debug("skipping recording bulk load in postBulkLoadHFile since backup is disabled");
+      return hasLoaded;
+    }
+    try (Connection connection = ConnectionFactory.createConnection(cfg);
+        BackupSystemTable tbl = new BackupSystemTable(connection)) {
+      List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
+      HRegionInfo info = ctx.getEnvironment().getRegionInfo();
+      TableName tableName = info.getTable();
+      if (!fullyBackedUpTables.contains(tableName)) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(tableName + " has not gone thru full backup");
+        }
+        return hasLoaded;
+      }
+      tbl.writePathsPostBulkLoad(tableName, info.getEncodedNameAsBytes(), finalPaths);
+      return hasLoaded;
+    } catch (IOException ioe) {
+      LOG.error("Failed to get tables which have been fully backed up", ioe);
+      return false;
+    }
+  }
+  @Override
+  public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
+      final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
+    Configuration cfg = ctx.getEnvironment().getConfiguration();
+    if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
+      LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
+      return;
+    }
+    try (Connection connection = ConnectionFactory.createConnection(cfg);
+        BackupSystemTable tbl = new BackupSystemTable(connection)) {
+      List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
+      HRegionInfo info = ctx.getEnvironment().getRegionInfo();
+      TableName tableName = info.getTable();
+      if (!fullyBackedUpTables.contains(tableName)) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(tableName + " has not gone thru full backup");
+        }
+        return;
+      }
+      tbl.writeFilesForBulkLoadPreCommit(tableName, info.getEncodedNameAsBytes(), family, pairs);
+      return;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java
new file mode 100644
index 0000000..dadd861
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * POJO class for backup request
+ */
+@InterfaceAudience.Private
+public final class BackupRequest {
+
+  public static class Builder {
+
+    BackupRequest request;
+
+    public Builder() {
+      request = new BackupRequest();
+    }
+
+    public Builder withBackupType(BackupType type) {
+      request.setBackupType(type);
+      return this;
+    }
+
+    public Builder withTableList(List<TableName> tables) {
+      request.setTableList(tables);
+      return this;
+    }
+
+    public Builder withTargetRootDir(String backupDir) {
+      request.setTargetRootDir(backupDir);
+      return this;
+    }
+
+    public Builder withBackupSetName(String setName) {
+      request.setBackupSetName(setName);
+      return this;
+    }
+
+    public Builder withTotalTasks(int numTasks) {
+      request.setTotalTasks(numTasks);
+      return this;
+    }
+
+    public Builder withBandwidthPerTasks(int bandwidth) {
+      request.setBandwidth(bandwidth);
+      return this;
+    }
+
+    public BackupRequest build() {
+      return request;
+    }
+
+  }
+
+  private BackupType type;
+  private List<TableName> tableList;
+  private String targetRootDir;
+  private int totalTasks = -1;
+  private long bandwidth = -1L;
+  private String backupSetName;
+
+  private BackupRequest() {
+  }
+
+  private BackupRequest setBackupType(BackupType type) {
+    this.type = type;
+    return this;
+  }
+
+  public BackupType getBackupType() {
+    return this.type;
+  }
+
+  private BackupRequest setTableList(List<TableName> tableList) {
+    this.tableList = tableList;
+    return this;
+  }
+
+  public List<TableName> getTableList() {
+    return this.tableList;
+  }
+
+  private BackupRequest setTargetRootDir(String targetRootDir) {
+    this.targetRootDir = targetRootDir;
+    return this;
+  }
+
+  public String getTargetRootDir() {
+    return this.targetRootDir;
+  }
+
+  private BackupRequest setTotalTasks(int totalTasks) {
+    this.totalTasks = totalTasks;
+    return this;
+  }
+
+  public int getTotalTasks() {
+    return this.totalTasks;
+  }
+
+  private BackupRequest setBandwidth(long bandwidth) {
+    this.bandwidth = bandwidth;
+    return this;
+  }
+
+  public long getBandwidth() {
+    return this.bandwidth;
+  }
+
+  public String getBackupSetName() {
+    return backupSetName;
+  }
+
+  private BackupRequest setBackupSetName(String backupSetName) {
+    this.backupSetName = backupSetName;
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
new file mode 100644
index 0000000..48e70a1
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * BackupRestoreConstants holds a bunch of HBase Backup and Restore constants
+ */
+@InterfaceAudience.Private
+public interface BackupRestoreConstants {
+  /*
+   * Backup/Restore constants
+   */
+  public final static String BACKUP_SYSTEM_TABLE_NAME_KEY = "hbase.backup.system.table.name";
+  public final static String BACKUP_SYSTEM_TABLE_NAME_DEFAULT = "backup:system";
+
+  public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
+
+  public final static int BACKUP_SYSTEM_TTL_DEFAULT = HConstants.FOREVER;
+  public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
+  public final static boolean BACKUP_ENABLE_DEFAULT = false;
+
+
+  public static final String BACKUP_MAX_ATTEMPTS_KEY = "hbase.backup.attempts.max";
+  public static final int DEFAULT_BACKUP_MAX_ATTEMPTS = 10;
+
+  public static final String BACKUP_ATTEMPTS_PAUSE_MS_KEY = "hbase.backup.attempts.pause.ms";
+  public static final int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
+
+  /*
+   *  Drivers option list
+   */
+  public static final String OPTION_OVERWRITE = "o";
+  public static final String OPTION_OVERWRITE_DESC =
+      "Overwrite data if any of the restore target tables exists";
+
+  public static final String OPTION_CHECK = "c";
+  public static final String OPTION_CHECK_DESC =
+      "Check restore sequence and dependencies only (does not execute the command)";
+
+  public static final String OPTION_SET = "s";
+  public static final String OPTION_SET_DESC = "Backup set name";
+  public static final String OPTION_SET_RESTORE_DESC =
+      "Backup set to restore, mutually exclusive with -t (table list)";
+  public static final String OPTION_SET_BACKUP_DESC =
+      "Backup set to backup, mutually exclusive with -t (table list)";
+  public static final String OPTION_DEBUG = "d";
+  public static final String OPTION_DEBUG_DESC = "Enable debug loggings";
+
+  public static final String OPTION_TABLE = "t";
+  public static final String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
+      + " which contain this table will be listed.";
+
+  public static final String OPTION_TABLE_LIST = "l";
+  public static final String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
+
+  public static final String OPTION_BANDWIDTH = "b";
+  public static final String OPTION_BANDWIDTH_DESC = "Bandwidth per task (MapReduce task) in MB/s";
+
+  public static final String OPTION_WORKERS = "w";
+  public static final String OPTION_WORKERS_DESC = "Number of parallel MapReduce tasks to execute";
+
+  public static final String OPTION_RECORD_NUMBER = "n";
+  public static final String OPTION_RECORD_NUMBER_DESC =
+      "Number of records of backup history. Default: 10";
+
+  public static final String OPTION_PATH = "p";
+  public static final String OPTION_PATH_DESC = "Backup destination root directory path";
+
+  public static final String OPTION_TABLE_MAPPING = "m";
+  public static final String OPTION_TABLE_MAPPING_DESC =
+      "A comma separated list of target tables. "
+          + "If specified, each table in <tables> must have a mapping";
+
+  public static final String BACKUP_CONFIG_STRING =  BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n"
+      + "hbase.master.logcleaner.plugins="
+      +"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+      + "hbase.procedure.master.classes=YOUR_CLASSES,"
+      +"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+      + "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+      + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+      + "and restart the cluster";
+  public static final String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
+      "in hbase-site.xml, set:\n "
+      + BACKUP_CONFIG_STRING;
+
+  public static final String VERIFY_BACKUP = "Please make sure that backup is enabled on the cluster. To enable backup, "+
+      "in hbase-site.xml, set:\n "
+      + BACKUP_CONFIG_STRING;
+
+  /*
+   *  Delimiter in table name list in restore command
+   */
+  public static final String TABLENAME_DELIMITER_IN_COMMAND = ",";
+
+  public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root";
+
+  public static final String BACKUPID_PREFIX = "backup_";
+
+  public static enum BackupCommand {
+    CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS,
+    SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
new file mode 100644
index 0000000..d72c884
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreJob;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Factory implementation for backup/restore related jobs
+ *
+ */
+@InterfaceAudience.Private
+public final class BackupRestoreFactory {
+
+  public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
+  public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
+  public final static String HBASE_BACKUP_MERGE_IMPL_CLASS = "hbase.backup.merge.class";
+
+  private BackupRestoreFactory() {
+    throw new AssertionError("Instantiating utility class...");
+  }
+
+  /**
+   * Gets backup restore job
+   * @param conf configuration
+   * @return backup restore job instance
+   */
+  public static RestoreJob getRestoreJob(Configuration conf) {
+    Class<? extends RestoreJob> cls =
+        conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreJob.class, RestoreJob.class);
+    RestoreJob service = ReflectionUtils.newInstance(cls, conf);
+    service.setConf(conf);
+    return service;
+  }
+
+  /**
+   * Gets backup copy job
+   * @param conf configuration
+   * @return backup copy job instance
+   */
+  public static BackupCopyJob getBackupCopyJob(Configuration conf) {
+    Class<? extends BackupCopyJob> cls =
+        conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class,
+          BackupCopyJob.class);
+    BackupCopyJob service = ReflectionUtils.newInstance(cls, conf);
+    service.setConf(conf);
+    return service;
+  }
+
+  /**
+   * Gets backup merge job
+   * @param conf configuration
+   * @return backup merge job instance
+   */
+  public static BackupMergeJob getBackupMergeJob(Configuration conf) {
+    Class<? extends BackupMergeJob> cls =
+        conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class,
+          BackupMergeJob.class);
+    BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
+    service.setConf(conf);
+    return service;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
new file mode 100644
index 0000000..a5390a6
--- /dev/null
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+
+/**
+ * Backup related information encapsulated for a table. At this moment only target directory,
+ * snapshot name and table name are encapsulated here.
+ */
+
+@InterfaceAudience.Private
+public class BackupTableInfo  {
+
+  /*
+   *  Table name for backup
+   */
+  private TableName table;
+
+  /*
+   *  Snapshot name for offline/online snapshot
+   */
+
+  private String snapshotName = null;
+
+  public BackupTableInfo() {
+
+  }
+
+  public BackupTableInfo(TableName table, String targetRootDir, String backupId) {
+    this.table = table;
+  }
+
+  public String getSnapshotName() {
+    return snapshotName;
+  }
+
+  public void setSnapshotName(String snapshotName) {
+    this.snapshotName = snapshotName;
+  }
+
+  public TableName getTable() {
+    return table;
+  }
+
+  public static BackupTableInfo convert(BackupProtos.BackupTableInfo proto) {
+    BackupTableInfo bs = new BackupTableInfo();
+    bs.table = ProtobufUtil.toTableName(proto.getTableName());
+    if (proto.hasSnapshotName()) {
+      bs.snapshotName = proto.getSnapshotName();
+    }
+    return bs;
+  }
+
+  public BackupProtos.BackupTableInfo toProto() {
+    BackupProtos.BackupTableInfo.Builder builder = BackupProtos.BackupTableInfo.newBuilder();
+    if (snapshotName != null) {
+      builder.setSnapshotName(snapshotName);
+    }
+    builder.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
+    return builder.build();
+  }
+}