You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ja...@apache.org on 2018/01/28 12:51:54 UTC

[1/3] hbase git commit: HBASE-19765 Fixed Checkstyle errors in hbase-backup

Repository: hbase
Updated Branches:
  refs/heads/master f1502a3ac -> c2236b77c


http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
index 9a2825e..18548f5 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
@@ -84,14 +84,12 @@ public final class BackupUtils {
    */
   public static HashMap<String, Long> getRSLogTimestampMins(
       HashMap<TableName, HashMap<String, Long>> rsLogTimestampMap) {
-
     if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
       return null;
     }
 
-    HashMap<String, Long> rsLogTimestampMins = new HashMap<String, Long>();
-    HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS =
-        new HashMap<String, HashMap<TableName, Long>>();
+    HashMap<String, Long> rsLogTimestampMins = new HashMap<>();
+    HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS = new HashMap<>();
 
     for (Entry<TableName, HashMap<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
       TableName table = tableEntry.getKey();
@@ -100,7 +98,7 @@ public final class BackupUtils {
         String rs = rsEntry.getKey();
         Long ts = rsEntry.getValue();
         if (!rsLogTimestampMapByRS.containsKey(rs)) {
-          rsLogTimestampMapByRS.put(rs, new HashMap<TableName, Long>());
+          rsLogTimestampMapByRS.put(rs, new HashMap<>());
           rsLogTimestampMapByRS.get(rs).put(table, ts);
         } else {
           rsLogTimestampMapByRS.get(rs).put(table, ts);
@@ -123,18 +121,15 @@ public final class BackupUtils {
    * @param backupInfo backup info
    * @param conf configuration
    * @throws IOException exception
-   * @throws InterruptedException exception
    */
-  public static void
-      copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
-          throws IOException, InterruptedException {
+  public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
+          throws IOException {
     Path rootDir = FSUtils.getRootDir(conf);
     FileSystem fs = rootDir.getFileSystem(conf);
 
     // for each table in the table set, copy out the table info and region
     // info files in the correct directory structure
     for (TableName table : backupInfo.getTables()) {
-
       if (!MetaTableAccessor.tableExists(conn, table)) {
         LOG.warn("Table " + table + " does not exists, skipping it.");
         continue;
@@ -150,8 +145,7 @@ public final class BackupUtils {
       LOG.debug("Attempting to copy table info for:" + table + " target: " + target
           + " descriptor: " + orig);
       LOG.debug("Finished copying tableinfo.");
-      List<RegionInfo> regions = null;
-      regions = MetaTableAccessor.getTableRegions(conn, table);
+      List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
       // For each region, write the region info to disk
       LOG.debug("Starting to write region info for table " + table);
       for (RegionInfo regionInfo : regions) {
@@ -210,10 +204,8 @@ public final class BackupUtils {
    * Returns WAL file name
    * @param walFileName WAL file name
    * @return WAL file name
-   * @throws IOException exception
-   * @throws IllegalArgumentException exception
    */
-  public static String getUniqueWALFileNamePart(String walFileName) throws IOException {
+  public static String getUniqueWALFileNamePart(String walFileName) {
     return getUniqueWALFileNamePart(new Path(walFileName));
   }
 
@@ -221,9 +213,8 @@ public final class BackupUtils {
    * Returns WAL file name
    * @param p WAL file path
    * @return WAL file name
-   * @throws IOException exception
    */
-  public static String getUniqueWALFileNamePart(Path p) throws IOException {
+  public static String getUniqueWALFileNamePart(Path p) {
     return p.getName();
   }
 
@@ -261,27 +252,23 @@ public final class BackupUtils {
     Path rootDir = FSUtils.getRootDir(c);
     Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
     Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    List<String> logFiles = new ArrayList<String>();
-
-    PathFilter filter = new PathFilter() {
+    List<String> logFiles = new ArrayList<>();
 
-      @Override
-      public boolean accept(Path p) {
-        try {
-          if (AbstractFSWALProvider.isMetaFile(p)) {
-            return false;
-          }
-          String host = parseHostNameFromLogFile(p);
-          if (host == null) {
-            return false;
-          }
-          Long oldTimestamp = hostTimestampMap.get(host);
-          Long currentLogTS = BackupUtils.getCreationTime(p);
-          return currentLogTS <= oldTimestamp;
-        } catch (Exception e) {
-          LOG.warn("Can not parse" + p, e);
+    PathFilter filter = p -> {
+      try {
+        if (AbstractFSWALProvider.isMetaFile(p)) {
+          return false;
+        }
+        String host = parseHostNameFromLogFile(p);
+        if (host == null) {
           return false;
         }
+        Long oldTimestamp = hostTimestampMap.get(host);
+        Long currentLogTS = BackupUtils.getCreationTime(p);
+        return currentLogTS <= oldTimestamp;
+      } catch (Exception e) {
+        LOG.warn("Can not parse" + p, e);
+        return false;
       }
     };
     FileSystem fs = FileSystem.get(c);
@@ -331,7 +318,7 @@ public final class BackupUtils {
    * @throws IOException exception
    */
   public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException {
-    boolean targetExists = false;
+    boolean targetExists;
     try {
       targetExists = checkPathExist(backupRootPath, conf);
     } catch (IOException e) {
@@ -363,7 +350,7 @@ public final class BackupUtils {
   public static <T> Long getMinValue(HashMap<T, Long> map) {
     Long minTimestamp = null;
     if (map != null) {
-      ArrayList<Long> timestampList = new ArrayList<Long>(map.values());
+      ArrayList<Long> timestampList = new ArrayList<>(map.values());
       Collections.sort(timestampList);
       // The min among all the RS log timestamps will be kept in backup system table table.
       minTimestamp = timestampList.get(0);
@@ -375,7 +362,6 @@ public final class BackupUtils {
    * Parses host name:port from archived WAL path
    * @param p path
    * @return host name
-   * @throws IOException exception
    */
   public static String parseHostFromOldLog(Path p) {
     try {
@@ -405,7 +391,7 @@ public final class BackupUtils {
   }
 
   public static List<String> getFiles(FileSystem fs, Path rootDir, List<String> files,
-      PathFilter filter) throws FileNotFoundException, IOException {
+      PathFilter filter) throws IOException {
     RemoteIterator<LocatedFileStatus> it = fs.listFiles(rootDir, true);
 
     while (it.hasNext()) {
@@ -433,7 +419,6 @@ public final class BackupUtils {
    * @throws IOException exception
    */
   private static void cleanupHLogDir(BackupInfo backupInfo, Configuration conf) throws IOException {
-
     String logDir = backupInfo.getHLogTargetDir();
     if (logDir == null) {
       LOG.warn("No log directory specified for " + backupInfo.getBackupId());
@@ -497,8 +482,8 @@ public final class BackupUtils {
    * @param tableName table name
    * @return backupPath String for the particular table
    */
-  public static String
-      getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
+  public static String getTableBackupDir(String backupRootDir, String backupId,
+          TableName tableName) {
     return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
         + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
         + Path.SEPARATOR;
@@ -510,8 +495,8 @@ public final class BackupUtils {
    * @return sorted list of BackupCompleteData
    */
   public static ArrayList<BackupInfo> sortHistoryListDesc(ArrayList<BackupInfo> historyList) {
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
-    TreeMap<String, BackupInfo> map = new TreeMap<String, BackupInfo>();
+    ArrayList<BackupInfo> list = new ArrayList<>();
+    TreeMap<String, BackupInfo> map = new TreeMap<>();
     for (BackupInfo h : historyList) {
       map.put(Long.toString(h.getStartTs()), h);
     }
@@ -531,8 +516,8 @@ public final class BackupUtils {
    * @param filter path filter
    * @return null if dir is empty or doesn't exist, otherwise FileStatus array
    */
-  public static FileStatus[]
-      listStatus(final FileSystem fs, final Path dir, final PathFilter filter) throws IOException {
+  public static FileStatus[] listStatus(final FileSystem fs, final Path dir,
+          final PathFilter filter) throws IOException {
     FileStatus[] status = null;
     try {
       status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
@@ -542,7 +527,11 @@ public final class BackupUtils {
         LOG.trace(dir + " doesn't exist");
       }
     }
-    if (status == null || status.length < 1) return null;
+
+    if (status == null || status.length < 1) {
+      return null;
+    }
+
     return status;
   }
 
@@ -577,10 +566,14 @@ public final class BackupUtils {
     FileSystem fs = FileSystem.get(conf);
     RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(backupRootPath);
 
-    List<BackupInfo> infos = new ArrayList<BackupInfo>();
+    List<BackupInfo> infos = new ArrayList<>();
     while (it.hasNext()) {
       LocatedFileStatus lfs = it.next();
-      if (!lfs.isDirectory()) continue;
+
+      if (!lfs.isDirectory()) {
+        continue;
+      }
+
       String backupId = lfs.getPath().getName();
       try {
         BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs);
@@ -591,12 +584,15 @@ public final class BackupUtils {
     }
     // Sort
     Collections.sort(infos, new Comparator<BackupInfo>() {
-
       @Override
       public int compare(BackupInfo o1, BackupInfo o2) {
         long ts1 = getTimestamp(o1.getBackupId());
         long ts2 = getTimestamp(o2.getBackupId());
-        if (ts1 == ts2) return 0;
+
+        if (ts1 == ts2) {
+          return 0;
+        }
+
         return ts1 < ts2 ? 1 : -1;
       }
 
@@ -611,7 +607,7 @@ public final class BackupUtils {
   public static List<BackupInfo> getHistory(Configuration conf, int n, Path backupRootPath,
       BackupInfo.Filter... filters) throws IOException {
     List<BackupInfo> infos = getHistory(conf, backupRootPath);
-    List<BackupInfo> ret = new ArrayList<BackupInfo>();
+    List<BackupInfo> ret = new ArrayList<>();
     for (BackupInfo info : infos) {
       if (ret.size() == n) {
         break;
@@ -672,7 +668,7 @@ public final class BackupUtils {
 
     for (Entry<TableName, BackupManifest> manifestEntry : backupManifestMap.entrySet()) {
       TableName table = manifestEntry.getKey();
-      TreeSet<BackupImage> imageSet = new TreeSet<BackupImage>();
+      TreeSet<BackupImage> imageSet = new TreeSet<>();
 
       ArrayList<BackupImage> depList = manifestEntry.getValue().getDependentListByTable(table);
       if (depList != null && !depList.isEmpty()) {
@@ -697,8 +693,8 @@ public final class BackupUtils {
   public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
       throws IOException {
     FileSystem fs = FileSystem.get(conf);
-    String tmp =
-        conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
+    String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+            HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
     Path path =
         new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
             + EnvironmentEdgeManager.currentTime());
@@ -736,7 +732,7 @@ public final class BackupUtils {
     // limit. Bad for snapshot restore.
     conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE);
     conf.set(LoadIncrementalHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes");
-    LoadIncrementalHFiles loader = null;
+    LoadIncrementalHFiles loader;
     try {
       loader = new LoadIncrementalHFiles(conf);
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 7f274db..1015665 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -36,9 +36,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.RestoreJob;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
@@ -46,26 +43,29 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 
 /**
  * A collection for methods used by multiple classes to restore HBase tables.
  */
 @InterfaceAudience.Private
 public class RestoreTool {
-
   public static final Logger LOG = LoggerFactory.getLogger(BackupUtils.class);
   private final static long TABLE_AVAILABILITY_WAIT_TIME = 180000;
 
   private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
-  protected Configuration conf = null;
+  protected Configuration conf;
   protected Path backupRootPath;
   protected String backupId;
   protected FileSystem fs;
@@ -89,7 +89,6 @@ public class RestoreTool {
    * @throws IOException exception
    */
   Path getTableArchivePath(TableName tableName) throws IOException {
-
     Path baseDir =
         new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
             HConstants.HFILE_ARCHIVE_DIRECTORY);
@@ -107,12 +106,11 @@ public class RestoreTool {
    * Gets region list
    * @param tableName table name
    * @return RegionList region list
-   * @throws FileNotFoundException exception
    * @throws IOException exception
    */
-  ArrayList<Path> getRegionList(TableName tableName) throws FileNotFoundException, IOException {
+  ArrayList<Path> getRegionList(TableName tableName) throws IOException {
     Path tableArchivePath = getTableArchivePath(tableName);
-    ArrayList<Path> regionDirList = new ArrayList<Path>();
+    ArrayList<Path> regionDirList = new ArrayList<>();
     FileStatus[] children = fs.listStatus(tableArchivePath);
     for (FileStatus childStatus : children) {
       // here child refer to each region(Name)
@@ -122,9 +120,7 @@ public class RestoreTool {
     return regionDirList;
   }
 
-
   void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException {
-
     try (Admin admin = conn.getAdmin()) {
       admin.modifyTable(desc);
       int attempt = 0;
@@ -155,7 +151,6 @@ public class RestoreTool {
    */
   public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
       TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
-
     try (Admin admin = conn.getAdmin()) {
       if (tableNames.length != newTableNames.length) {
         throw new IOException("Number of source tables and target tables does not match!");
@@ -228,15 +223,15 @@ public class RestoreTool {
 
   /**
    * Returns value represent path for:
-   * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/snapshot_1396650097621_namespace_table"
+   * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/
+   *    snapshot_1396650097621_namespace_table"
    * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
    * .data.manifest (trunk)
    * @param tableName table name
    * @return path to table info
-   * @throws FileNotFoundException exception
    * @throws IOException exception
    */
-  Path getTableInfoPath(TableName tableName) throws FileNotFoundException, IOException {
+  Path getTableInfoPath(TableName tableName) throws IOException {
     Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
     Path tableInfoPath = null;
 
@@ -257,15 +252,16 @@ public class RestoreTool {
    * @param tableName is the table backed up
    * @return {@link TableDescriptor} saved in backup image of the table
    */
-  TableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
+  TableDescriptor getTableDesc(TableName tableName) throws IOException {
     Path tableInfoPath = this.getTableInfoPath(tableName);
     SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
     SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
     TableDescriptor tableDescriptor = manifest.getTableDescriptor();
     if (!tableDescriptor.getTableName().equals(tableName)) {
       LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
-          + tableInfoPath.toString());
-      LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
+              + tableInfoPath.toString());
+      LOG.error("tableDescriptor.getNameAsString() = "
+              + tableDescriptor.getTableName().getNameAsString());
       throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
           + " under tableInfoPath: " + tableInfoPath.toString());
     }
@@ -367,11 +363,10 @@ public class RestoreTool {
    * Gets region list
    * @param tableArchivePath table archive path
    * @return RegionList region list
-   * @throws FileNotFoundException exception
    * @throws IOException exception
    */
-  ArrayList<Path> getRegionList(Path tableArchivePath) throws FileNotFoundException, IOException {
-    ArrayList<Path> regionDirList = new ArrayList<Path>();
+  ArrayList<Path> getRegionList(Path tableArchivePath) throws IOException {
+    ArrayList<Path> regionDirList = new ArrayList<>();
     FileStatus[] children = fs.listStatus(tableArchivePath);
     for (FileStatus childStatus : children) {
       // here child refer to each region(Name)
@@ -386,9 +381,8 @@ public class RestoreTool {
    * @param regionDirList region dir list
    * @return a set of keys to store the boundaries
    */
-  byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList) throws FileNotFoundException,
-      IOException {
-    TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
+  byte[][] generateBoundaryKeys(ArrayList<Path> regionDirList) throws IOException {
+    TreeMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
     // Build a set of keys to store the boundaries
     // calculate region boundaries and add all the column families to the table descriptor
     for (Path regionDir : regionDirList) {
@@ -490,7 +484,7 @@ public class RestoreTool {
       }
       if (createNew) {
         LOG.info("Creating target table '" + targetTableName + "'");
-        byte[][] keys = null;
+        byte[][] keys;
         if (regionDirList == null || regionDirList.size() == 0) {
           admin.createTable(htd, null);
         } else {
@@ -514,5 +508,4 @@ public class RestoreTool {
       }
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 2be7784..4243f5b 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -75,7 +75,6 @@ import org.slf4j.LoggerFactory;
  * tests should have their own classes and extend this one
  */
 public class TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupBase.class);
 
   protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -107,10 +106,7 @@ public class TestBackupBase {
   protected static boolean setupIsDone = false;
   protected static boolean useSecondCluster = false;
 
-
-  static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient
-  {
-
+  static class IncrementalTableBackupClientForTest extends IncrementalTableBackupClient {
     public IncrementalTableBackupClientForTest() {
     }
 
@@ -120,8 +116,7 @@ public class TestBackupBase {
     }
 
     @Override
-    public void execute() throws IOException
-    {
+    public void execute() throws IOException {
       // case INCREMENTAL_COPY:
       try {
         // case PREPARE_INCREMENTAL:
@@ -174,14 +169,10 @@ public class TestBackupBase {
           BackupType.INCREMENTAL, conf);
         throw new IOException(e);
       }
-
     }
   }
 
-  static class FullTableBackupClientForTest extends FullTableBackupClient
-  {
-
-
+  static class FullTableBackupClientForTest extends FullTableBackupClient {
     public FullTableBackupClientForTest() {
     }
 
@@ -191,21 +182,20 @@ public class TestBackupBase {
     }
 
     @Override
-    public void execute() throws IOException
-    {
+    public void execute() throws IOException {
       // Get the stage ID to fail on
       try (Admin admin = conn.getAdmin()) {
         // Begin BACKUP
         beginBackup(backupManager, backupInfo);
         failStageIf(Stage.stage_0);
-        String savedStartCode = null;
-        boolean firstBackup = false;
+        String savedStartCode;
+        boolean firstBackup;
         // do snapshot for full table backup
         savedStartCode = backupManager.readBackupStartCode();
         firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
         if (firstBackup) {
-          // This is our first backup. Let's put some marker to system table so that we can hold the logs
-          // while we do the backup.
+          // This is our first backup. Let's put some marker to system table so that we can hold the
+          // logs while we do the backup.
           backupManager.writeBackupStartCode(0L);
         }
         failStageIf(Stage.stage_1);
@@ -216,7 +206,7 @@ public class TestBackupBase {
         // the snapshot.
         LOG.info("Execute roll log procedure for full backup ...");
 
-        Map<String, String> props = new HashMap<String, String>();
+        Map<String, String> props = new HashMap<>();
         props.put("backupRoot", backupInfo.getBackupRootDir());
         admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
           LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
@@ -277,12 +267,10 @@ public class TestBackupBase {
         throw new IOException(e);
       }
     }
-
   }
 
-
   /**
-   * @throws java.lang.Exception
+   * @throws Exception if starting the mini cluster or setting up the tables fails
    */
   @Before
   public void setUp() throws Exception {
@@ -315,12 +303,12 @@ public class TestBackupBase {
 
     TEST_UTIL.startMiniMapReduceCluster();
     BACKUP_ROOT_DIR =
-        new Path ( new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")),
+        new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")),
           BACKUP_ROOT_DIR).toString();
     LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
     if (useSecondCluster) {
       BACKUP_REMOTE_ROOT_DIR =
-          new Path ( new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS"))
+          new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS"))
           + BACKUP_REMOTE_ROOT_DIR).toString();
       LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
     }
@@ -338,7 +326,7 @@ public class TestBackupBase {
   }
 
   /**
-   * @throws java.lang.Exception
+   * @throws Exception if deleting the archive directory or shutting down the mini cluster fails
    */
   @AfterClass
   public static void tearDown() throws Exception {
@@ -366,7 +354,6 @@ public class TestBackupBase {
     return t;
   }
 
-
   protected BackupRequest createBackupRequest(BackupType type,
       List<TableName> tables, String path) {
     BackupRequest.Builder builder = new BackupRequest.Builder();
@@ -406,7 +393,6 @@ public class TestBackupBase {
   }
 
   protected static void loadTable(Table table) throws Exception {
-
     Put p; // 100 + 1 row to t1_syncup
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       p = new Put(Bytes.toBytes("row" + i));
@@ -417,7 +403,6 @@ public class TestBackupBase {
   }
 
   protected static void createTables() throws Exception {
-
     long tid = System.currentTimeMillis();
     table1 = TableName.valueOf("ns1:test-" + tid);
     HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
@@ -461,13 +446,21 @@ public class TestBackupBase {
 
   protected boolean checkSucceeded(String backupId) throws IOException {
     BackupInfo status = getBackupInfo(backupId);
-    if (status == null) return false;
+
+    if (status == null) {
+      return false;
+    }
+
     return status.getState() == BackupState.COMPLETE;
   }
 
   protected boolean checkFailed(String backupId) throws IOException {
     BackupInfo status = getBackupInfo(backupId);
-    if (status == null) return false;
+
+    if (status == null) {
+      return false;
+    }
+
     return status.getState() == BackupState.FAILED;
   }
 
@@ -500,6 +493,5 @@ public class TestBackupBase {
     while (it.hasNext()) {
       LOG.debug(Objects.toString(it.next().getPath()));
     }
-
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
index 6d4fc47..a240bd8 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -31,16 +31,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestBackupBoundaryTests extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupBoundaryTests.class);
 
   /**
    * Verify that full backup is created on a single empty table correctly.
-   * @throws Exception
+   *
+   * @throws Exception if doing the full backup fails
    */
   @Test
   public void testFullBackupSingleEmpty() throws Exception {
-
     LOG.info("create full backup image on single table");
     List<TableName> tables = Lists.newArrayList(table3);
     LOG.info("Finished Backup " + fullTableBackup(tables));
@@ -48,7 +47,8 @@ public class TestBackupBoundaryTests extends TestBackupBase {
 
   /**
    * Verify that full backup is created on multiple empty tables correctly.
-   * @throws Exception
+   *
+   * @throws Exception if doing the full backup fails
    */
   @Test
   public void testFullBackupMultipleEmpty() throws Exception {
@@ -60,11 +60,11 @@ public class TestBackupBoundaryTests extends TestBackupBase {
 
   /**
    * Verify that full backup fails on a single table that does not exist.
-   * @throws Exception
+   *
+   * @throws Exception if doing the full backup fails
    */
   @Test(expected = IOException.class)
   public void testFullBackupSingleDNE() throws Exception {
-
     LOG.info("test full backup fails on a single table that does not exist");
     List<TableName> tables = toList("tabledne");
     fullTableBackup(tables);
@@ -72,11 +72,11 @@ public class TestBackupBoundaryTests extends TestBackupBase {
 
   /**
    * Verify that full backup fails on multiple tables that do not exist.
-   * @throws Exception
+   *
+   * @throws Exception if doing the full backup fails
    */
   @Test(expected = IOException.class)
   public void testFullBackupMultipleDNE() throws Exception {
-
     LOG.info("test full backup fails on multiple tables that do not exist");
     List<TableName> tables = toList("table1dne", "table2dne");
     fullTableBackup(tables);
@@ -84,7 +84,8 @@ public class TestBackupBoundaryTests extends TestBackupBase {
 
   /**
    * Verify that full backup fails on tableset containing real and fake tables.
-   * @throws Exception
+   *
+   * @throws Exception if doing the full backup fails
    */
   @Test(expected = IOException.class)
   public void testFullBackupMixExistAndDNE() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
index 0dd3de9..517d516 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
@@ -38,13 +38,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestBackupDelete extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupDelete.class);
 
   /**
    * Verify that full backup is created on a single table with data correctly. Verify that history
-   * works as expected
-   * @throws Exception
+   * works as expected.
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testBackupDelete() throws Exception {
@@ -70,8 +70,9 @@ public class TestBackupDelete extends TestBackupBase {
 
   /**
    * Verify that full backup is created on a single table with data correctly. Verify that history
-   * works as expected
-   * @throws Exception
+   * works as expected.
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testBackupDeleteCommand() throws Exception {
@@ -97,5 +98,4 @@ public class TestBackupDelete extends TestBackupBase {
     LOG.info(baos.toString());
     assertTrue(output.indexOf("Deleted 1 backups") >= 0);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
index 114480e..86cd276 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
@@ -30,17 +30,16 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(MediumTests.class)
 public class TestBackupDeleteRestore extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupDeleteRestore.class);
 
   /**
    * Verify that load data- backup - delete some data - restore works as expected - deleted data get
    * restored.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testBackupDeleteRestore() throws Exception {
-
     LOG.info("test full restore on a single table empty table");
 
     List<TableName> tables = Lists.newArrayList(table1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java
index 9447f28..66cb762 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java
@@ -56,12 +56,9 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  */
 @Category(LargeTests.class)
 public class TestBackupDeleteWithFailures extends TestBackupBase{
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupDeleteWithFailures.class);
 
-
-
-  public static enum Failure {
+  public enum Failure {
     NO_FAILURES,
     PRE_SNAPSHOT_FAILURE,
     PRE_DELETE_SNAPSHOT_FAILURE,
@@ -69,7 +66,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
   }
 
   public static class MasterSnapshotObserver implements MasterCoprocessor, MasterObserver {
-    List<Failure> failures = new ArrayList<Failure>();
+    List<Failure> failures = new ArrayList<>();
 
     public void setFailures(Failure ... f) {
       failures.clear();
@@ -86,18 +83,17 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
     @Override
     public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
         final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor)
-        throws IOException
-    {
-       if (failures.contains(Failure.PRE_SNAPSHOT_FAILURE)) {
-         throw new IOException ("preSnapshot");
-       }
+        throws IOException {
+      if (failures.contains(Failure.PRE_SNAPSHOT_FAILURE)) {
+        throw new IOException("preSnapshot");
+      }
     }
 
     @Override
     public void preDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
         SnapshotDescription snapshot) throws IOException {
       if (failures.contains(Failure.PRE_DELETE_SNAPSHOT_FAILURE)) {
-        throw new IOException ("preDeleteSnapshot");
+        throw new IOException("preDeleteSnapshot");
       }
     }
 
@@ -105,14 +101,13 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
     public void postDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
         SnapshotDescription snapshot) throws IOException {
       if (failures.contains(Failure.POST_DELETE_SNAPSHOT_FAILURE)) {
-        throw new IOException ("postDeleteSnapshot");
+        throw new IOException("postDeleteSnapshot");
       }
     }
-
   }
 
   /**
-   * @throws java.lang.Exception
+   * @throws Exception if starting the mini cluster or setting up the tables fails
    */
   @Override
   @Before
@@ -123,21 +118,20 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
     super.setUp();
   }
 
-
   private MasterSnapshotObserver getMasterSnapshotObserver() {
     return TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost()
         .findCoprocessor(MasterSnapshotObserver.class);
   }
 
   @Test
-  public void testBackupDeleteWithFailures() throws Exception
-  {
-     testBackupDeleteWithFailuresAfter(1, Failure.PRE_DELETE_SNAPSHOT_FAILURE);
-     testBackupDeleteWithFailuresAfter(0, Failure.POST_DELETE_SNAPSHOT_FAILURE);
-     testBackupDeleteWithFailuresAfter(1, Failure.PRE_SNAPSHOT_FAILURE);
+  public void testBackupDeleteWithFailures() throws Exception {
+    testBackupDeleteWithFailuresAfter(1, Failure.PRE_DELETE_SNAPSHOT_FAILURE);
+    testBackupDeleteWithFailuresAfter(0, Failure.POST_DELETE_SNAPSHOT_FAILURE);
+    testBackupDeleteWithFailuresAfter(1, Failure.PRE_SNAPSHOT_FAILURE);
   }
 
-  private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures) throws Exception {
+  private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures)
+          throws Exception {
     LOG.info("test repair backup delete on a single table with data and failures "+ failures[0]);
     List<TableName> tableList = Lists.newArrayList(table1);
     String backupId = fullTableBackup(tableList);
@@ -158,11 +152,13 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
     try {
       getBackupAdmin().deleteBackups(backupIds);
     } catch(IOException e) {
-      if(expected != 1) assertTrue(false);
+      if(expected != 1) {
+        assertTrue(false);
+      }
     }
 
     // Verify that history length == expected after delete failure
-    assertTrue (table.getBackupHistory().size() == expected);
+    assertTrue(table.getBackupHistory().size() == expected);
 
     String[] ids = table.getListOfBackupIdsFromDeleteOperation();
 
@@ -183,7 +179,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
     int ret = ToolRunner.run(conf1, new BackupDriver(), args);
     assertTrue(ret == 0);
     // Verify that history length == 0
-    assertTrue (table.getBackupHistory().size() == 0);
+    assertTrue(table.getBackupHistory().size() == 0);
     ids = table.getListOfBackupIdsFromDeleteOperation();
 
     // Verify that we do not have delete record in backup system table
@@ -192,7 +188,4 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
     table.close();
     admin.close();
   }
-
-
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
index 2f111e2..8e19076 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
@@ -39,16 +39,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestBackupDescribe extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupDescribe.class);
 
   /**
-   * Verify that describe works as expected if incorrect backup Id is supplied
-   * @throws Exception
+   * Verify that describe works as expected if incorrect backup Id is supplied.
+   *
+   * @throws Exception if creating the {@link BackupDriver} fails
    */
   @Test
   public void testBackupDescribe() throws Exception {
-
     LOG.info("test backup describe on a single table with data");
 
     String[] args = new String[] { "describe", "backup_2" };
@@ -75,7 +74,6 @@ public class TestBackupDescribe extends TestBackupBase {
 
   @Test
   public void testBackupDescribeCommand() throws Exception {
-
     LOG.info("test backup describe on a single table with data: command-line");
 
     List<TableName> tableList = Lists.newArrayList(table1);
@@ -103,7 +101,5 @@ public class TestBackupDescribe extends TestBackupBase {
     String desc = status.getShortDescription();
     table.close();
     assertTrue(response.indexOf(desc) >= 0);
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
index c2c3e59..0a54a2d 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
@@ -57,7 +57,7 @@ public class TestBackupHFileCleaner {
   Path root;
 
   /**
-   * @throws java.lang.Exception
+   * @throws Exception if starting the mini cluster or getting the filesystem fails
    */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -68,7 +68,7 @@ public class TestBackupHFileCleaner {
   }
 
   /**
-   * @throws java.lang.Exception
+   * @throws Exception if closing the filesystem or shutting down the mini cluster fails
    */
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
@@ -110,7 +110,9 @@ public class TestBackupHFileCleaner {
     deletable = cleaner.getDeletableFiles(stats);
     boolean found = false;
     for (FileStatus stat1 : deletable) {
-      if (stat.equals(stat1)) found = true;
+      if (stat.equals(stat1)) {
+        found = true;
+      }
     }
     assertTrue("Cleaner should allow to delete this file as there is no hfile reference "
         + "for it.", found);
@@ -133,7 +135,9 @@ public class TestBackupHFileCleaner {
     deletable = cleaner.getDeletableFiles(stats);
     found = false;
     for (FileStatus stat1 : deletable) {
-      if (stat.equals(stat1)) found = true;
+      if (stat.equals(stat1)) {
+        found = true;
+      }
     }
     assertFalse("Cleaner should not allow to delete this file as there is a hfile reference "
         + "for it.", found);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
index 76838f9..16c79fd 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
@@ -37,7 +37,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestBackupShowHistory extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupShowHistory.class);
 
   private boolean findBackup(List<BackupInfo> history, String backupId) {
@@ -54,8 +53,9 @@ public class TestBackupShowHistory extends TestBackupBase {
 
   /**
    * Verify that full backup is created on a single table with data correctly. Verify that history
-   * works as expected
-   * @throws Exception
+   * works as expected.
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testBackupHistory() throws Exception {
@@ -69,12 +69,7 @@ public class TestBackupShowHistory extends TestBackupBase {
 
     List<BackupInfo> history = getBackupAdmin().getHistory(10);
     assertTrue(findBackup(history, backupId));
-    BackupInfo.Filter nullFilter = new BackupInfo.Filter() {
-      @Override
-      public boolean apply(BackupInfo info) {
-        return true;
-      }
-    };
+    BackupInfo.Filter nullFilter = info -> true;
     history = BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), nullFilter);
     assertTrue(findBackup(history, backupId));
 
@@ -95,20 +90,17 @@ public class TestBackupShowHistory extends TestBackupBase {
     String backupId2 = fullTableBackup(tableList);
     assertTrue(checkSucceeded(backupId2));
     LOG.info("backup complete: " + table2);
-    BackupInfo.Filter tableNameFilter = new BackupInfo.Filter() {
-      @Override
-      public boolean apply(BackupInfo image) {
-        if (table1 == null) return true;
-        List<TableName> names = image.getTableNames();
-        return names.contains(table1);
+    BackupInfo.Filter tableNameFilter = image -> {
+      if (table1 == null) {
+        return true;
       }
+
+      List<TableName> names = image.getTableNames();
+      return names.contains(table1);
     };
-    BackupInfo.Filter tableSetFilter = new BackupInfo.Filter() {
-      @Override
-      public boolean apply(BackupInfo info) {
-        String backupId = info.getBackupId();
-        return backupId.startsWith("backup");
-      }
+    BackupInfo.Filter tableSetFilter = info -> {
+      String backupId1 = info.getBackupId();
+      return backupId1.startsWith("backup");
     };
 
     history = getBackupAdmin().getHistory(10, tableNameFilter, tableSetFilter);
@@ -143,5 +135,4 @@ public class TestBackupShowHistory extends TestBackupBase {
     assertTrue(ret == 0);
     LOG.info("show_history");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
index ac0dc61..f9793c9 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
@@ -36,16 +36,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestBackupStatusProgress extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestBackupStatusProgress.class);
 
   /**
    * Verify that full backup is created on a single table with data correctly.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testBackupStatusProgress() throws Exception {
-
     LOG.info("test backup status/progress on a single table with data");
 
     List<TableName> tableList = Lists.newArrayList(table1);
@@ -63,7 +62,6 @@ public class TestBackupStatusProgress extends TestBackupBase {
 
   @Test
   public void testBackupStatusProgressCommand() throws Exception {
-
     LOG.info("test backup status/progress on a single table with data: command-line");
 
     List<TableName> tableList = Lists.newArrayList(table1);
@@ -90,6 +88,5 @@ public class TestBackupStatusProgress extends TestBackupBase {
     assertTrue(responce.indexOf(backupId) >= 0);
     assertTrue(responce.indexOf("progress") > 0);
     assertTrue(responce.indexOf("100") > 0);
-
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
index f5ee268..5b84c90 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -60,7 +60,6 @@ import org.junit.experimental.categories.Category;
  */
 @Category(MediumTests.class)
 public class TestBackupSystemTable {
-
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
   protected static Configuration conf = UTIL.getConfiguration();
   protected static MiniHBaseCluster cluster;
@@ -152,9 +151,7 @@ public class TestBackupSystemTable {
 
   @Test
   public void testBackupDelete() throws IOException {
-
     try (BackupSystemTable table = new BackupSystemTable(conn)) {
-
       int n = 10;
       List<BackupInfo> list = createBackupInfoList(n);
 
@@ -259,7 +256,7 @@ public class TestBackupSystemTable {
     tables.add(TableName.valueOf("t2"));
     tables.add(TableName.valueOf("t3"));
 
-    HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>();
+    HashMap<String, Long> rsTimestampMap = new HashMap<>();
 
     rsTimestampMap.put("rs1:100", 100L);
     rsTimestampMap.put("rs2:100", 101L);
@@ -285,7 +282,7 @@ public class TestBackupSystemTable {
     tables1.add(TableName.valueOf("t4"));
     tables1.add(TableName.valueOf("t5"));
 
-    HashMap<String, Long> rsTimestampMap1 = new HashMap<String, Long>();
+    HashMap<String, Long> rsTimestampMap1 = new HashMap<>();
 
     rsTimestampMap1.put("rs1:100", 200L);
     rsTimestampMap1.put("rs2:100", 201L);
@@ -460,7 +457,7 @@ public class TestBackupSystemTable {
       String[] removeTables = new String[] { "table4", "table3" };
       table.removeFromBackupSet(setName, removeTables);
 
-     Set<String> expectedTables = new HashSet<>(Arrays.asList("table1", "table2"));
+      Set<String> expectedTables = new HashSet<>(Arrays.asList("table1", "table2"));
 
       List<TableName> tnames = table.describeBackupSet(setName);
       assertTrue(tnames != null);
@@ -514,7 +511,6 @@ public class TestBackupSystemTable {
   }
 
   private BackupInfo createBackupInfo() {
-
     BackupInfo ctxt =
         new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] {
             TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") },
@@ -525,7 +521,7 @@ public class TestBackupSystemTable {
   }
 
   private List<BackupInfo> createBackupInfoList(int size) {
-    List<BackupInfo> list = new ArrayList<BackupInfo>();
+    List<BackupInfo> list = new ArrayList<>();
     for (int i = 0; i < size; i++) {
       list.add(createBackupInfo());
       try {
@@ -539,6 +535,8 @@ public class TestBackupSystemTable {
 
   @AfterClass
   public static void tearDown() throws IOException {
-    if (cluster != null) cluster.shutdown();
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
index 7705b1d..41d0c98 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
@@ -36,16 +36,15 @@ import org.slf4j.LoggerFactory;
 
 @Category(LargeTests.class)
 public class TestFullBackupSet extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestFullBackupSet.class);
 
   /**
    * Verify that full backup is created on a single table with data correctly.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testFullBackupSetExist() throws Exception {
-
     LOG.info("Test full backup, backup set exists");
 
     // Create set
@@ -83,21 +82,16 @@ public class TestFullBackupSet extends TestBackupBase {
       TEST_UTIL.deleteTable(table1_restore);
       LOG.info("restore into other table is complete");
       hba.close();
-
     }
-
   }
 
   @Test
   public void testFullBackupSetDoesNotExist() throws Exception {
-
     LOG.info("test full backup, backup set does not exist");
     String name = "name1";
     String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name };
     // Run backup
     int ret = ToolRunner.run(conf1, new BackupDriver(), args);
     assertTrue(ret != 0);
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
index 0e5ab33..1821a3e 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -31,16 +31,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestFullRestore extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestFullRestore.class);
 
   /**
-   * Verify that a single table is restored to a new table
-   * @throws Exception
+   * Verify that a single table is restored to a new table.
+   *
+   * @throws Exception if doing the backup, restoring it or an operation on the tables fails
    */
   @Test
   public void testFullRestoreSingle() throws Exception {
-
     LOG.info("test full restore on a single table empty table");
 
     List<TableName> tables = Lists.newArrayList(table1);
@@ -60,11 +59,8 @@ public class TestFullRestore extends TestBackupBase {
     hba.close();
   }
 
-
-
   @Test
   public void testFullRestoreSingleCommand() throws Exception {
-
     LOG.info("test full restore on a single table empty table: command-line");
 
     List<TableName> tables = Lists.newArrayList(table1);
@@ -87,7 +83,6 @@ public class TestFullRestore extends TestBackupBase {
 
   @Test
   public void testFullRestoreCheckCommand() throws Exception {
-
     LOG.info("test full restore on a single table: command-line, check only");
 
     List<TableName> tables = Lists.newArrayList(table1);
@@ -108,7 +103,8 @@ public class TestFullRestore extends TestBackupBase {
 
   /**
    * Verify that multiple tables are restored to new tables.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup, restoring it or an operation on the tables fails
    */
   @Test
   public void testFullRestoreMultiple() throws Exception {
@@ -132,7 +128,8 @@ public class TestFullRestore extends TestBackupBase {
 
   /**
    * Verify that multiple tables are restored to new tables.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup, restoring it or an operation on the tables fails
    */
   @Test
   public void testFullRestoreMultipleCommand() throws Exception {
@@ -161,12 +158,12 @@ public class TestFullRestore extends TestBackupBase {
   }
 
   /**
-   * Verify that a single table is restored using overwrite
-   * @throws Exception
+   * Verify that a single table is restored using overwrite.
+   *
+   * @throws Exception if doing the backup or restoring it fails
    */
   @Test
   public void testFullRestoreSingleOverwrite() throws Exception {
-
     LOG.info("test full restore on a single table empty table");
     List<TableName> tables = Lists.newArrayList(table1);
     String backupId = fullTableBackup(tables);
@@ -181,12 +178,12 @@ public class TestFullRestore extends TestBackupBase {
   }
 
   /**
-   * Verify that a single table is restored using overwrite
-   * @throws Exception
+   * Verify that a single table is restored using overwrite.
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testFullRestoreSingleOverwriteCommand() throws Exception {
-
     LOG.info("test full restore on a single table empty table: command-line");
     List<TableName> tables = Lists.newArrayList(table1);
     String backupId = fullTableBackup(tables);
@@ -203,12 +200,12 @@ public class TestFullRestore extends TestBackupBase {
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     assertTrue(hba.tableExists(table1));
     hba.close();
-
   }
 
   /**
    * Verify that multiple tables are restored to new tables using overwrite.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or restoring it fails
    */
   @Test
   public void testFullRestoreMultipleOverwrite() throws Exception {
@@ -226,7 +223,8 @@ public class TestFullRestore extends TestBackupBase {
 
   /**
    * Verify that multiple tables are restored to new tables using overwrite.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testFullRestoreMultipleOverwriteCommand() throws Exception {
@@ -253,11 +251,11 @@ public class TestFullRestore extends TestBackupBase {
 
   /**
    * Verify that restore fails on a single table that does not exist.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or restoring it fails
    */
   @Test(expected = IOException.class)
   public void testFullRestoreSingleDNE() throws Exception {
-
     LOG.info("test restore fails on a single table that does not exist");
     List<TableName> tables = Lists.newArrayList(table1);
     String backupId = fullTableBackup(tables);
@@ -274,11 +272,11 @@ public class TestFullRestore extends TestBackupBase {
 
   /**
    * Verify that restore fails on a single table that does not exist.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or restoring it fails
    */
   @Test
   public void testFullRestoreSingleDNECommand() throws Exception {
-
     LOG.info("test restore fails on a single table that does not exist: command-line");
     List<TableName> tables = Lists.newArrayList(table1);
     String backupId = fullTableBackup(tables);
@@ -294,16 +292,15 @@ public class TestFullRestore extends TestBackupBase {
     // Run restore
     int ret = ToolRunner.run(conf1, new RestoreDriver(), args);
     assertTrue(ret != 0);
-
   }
 
   /**
    * Verify that restore fails on multiple tables that do not exist.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or restoring it fails
    */
   @Test(expected = IOException.class)
   public void testFullRestoreMultipleDNE() throws Exception {
-
     LOG.info("test restore fails on multiple tables that do not exist");
 
     List<TableName> tables = Lists.newArrayList(table2, table3);
@@ -320,11 +317,11 @@ public class TestFullRestore extends TestBackupBase {
 
   /**
    * Verify that restore fails on multiple tables that do not exist.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or restoring it fails
    */
   @Test
   public void testFullRestoreMultipleDNECommand() throws Exception {
-
     LOG.info("test restore fails on multiple tables that do not exist: command-line");
 
     List<TableName> tables = Lists.newArrayList(table2, table3);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
index 0ec78be..55c14ac 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -55,13 +55,13 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestIncrementalBackupMergeWithFailures.class);
 
-  static enum FailurePhase {
+  enum FailurePhase {
     PHASE1, PHASE2, PHASE3, PHASE4
   }
+
   public final static String FAILURE_PHASE_KEY = "failurePhase";
 
   static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
-
     FailurePhase failurePhase;
 
     @Override
@@ -75,7 +75,6 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
       }
     }
 
-
     /**
      * This is the exact copy of parent's run() with injections
      * of different types of failures
@@ -95,14 +94,13 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
         LOG.debug("Merge backup images " + bids);
       }
 
-      List<Pair<TableName, Path>> processedTableList = new ArrayList<Pair<TableName, Path>>();
+      List<Pair<TableName, Path>> processedTableList = new ArrayList<>();
       boolean finishedTables = false;
       Connection conn = ConnectionFactory.createConnection(getConf());
       BackupSystemTable table = new BackupSystemTable(conn);
       FileSystem fs = FileSystem.get(getConf());
 
       try {
-
         // Start backup exclusive operation
         table.startBackupExclusiveOperation();
         // Start merge operation
@@ -112,19 +110,16 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
         String mergedBackupId = findMostRecentBackupId(backupIds);
 
         TableName[] tableNames = getTableNamesInBackupImages(backupIds);
-        String backupRoot = null;
 
         BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
-        backupRoot = bInfo.getBackupRootDir();
+        String backupRoot = bInfo.getBackupRootDir();
         // PHASE 1
         checkFailure(FailurePhase.PHASE1);
 
         for (int i = 0; i < tableNames.length; i++) {
-
           LOG.info("Merge backup images for " + tableNames[i]);
 
           // Find input directories for table
-
           Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
           String dirs = StringUtils.join(dirPaths, ",");
           Path bulkOutputPath =
@@ -140,14 +135,13 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
           conf.set(bulkOutputConfKey, bulkOutputPath.toString());
           String[] playerArgs = { dirs, tableNames[i].getNameAsString() };
 
-          int result = 0;
           // PHASE 2
           checkFailure(FailurePhase.PHASE2);
           player.setConf(getConf());
-          result = player.run(playerArgs);
+          int result = player.run(playerArgs);
           if (succeeded(result)) {
             // Add to processed table list
-            processedTableList.add(new Pair<TableName, Path>(tableNames[i], bulkOutputPath));
+            processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath));
           } else {
             throw new IOException("Can not merge backup images for " + dirs
                 + " (check Hadoop/MR and HBase logs). Player return code =" + result);
@@ -193,21 +187,17 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
         table.close();
         conn.close();
       }
-
     }
 
     private void checkFailure(FailurePhase phase) throws IOException {
-      if ( failurePhase != null && failurePhase == phase) {
-        throw new IOException (phase.toString());
+      if (failurePhase != null && failurePhase == phase) {
+        throw new IOException(phase.toString());
       }
     }
-
   }
 
-
   @Test
   public void TestIncBackupMergeRestore() throws Exception {
-
     int ADD_ROWS = 99;
     // #1 - create full backup for all tables
     LOG.info("create full backup image for all tables");
@@ -219,8 +209,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
 
     Connection conn = ConnectionFactory.createConnection(conf1);
 
-    HBaseAdmin admin = null;
-    admin = (HBaseAdmin) conn.getAdmin();
+    HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
@@ -262,7 +251,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
 
     // #4 Merge backup images with failures
 
-    for ( FailurePhase phase : FailurePhase.values()) {
+    for (FailurePhase phase : FailurePhase.values()) {
       Configuration conf = conn.getConfiguration();
 
       conf.set(FAILURE_PHASE_KEY, phase.toString());
@@ -329,7 +318,5 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
 
     admin.close();
     conn.close();
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
index ed1d010..5c29b3d 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.tool.TestLoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -47,6 +46,8 @@ import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
 /**
  * 1. Create table t1
  * 2. Load data to t1
@@ -63,13 +64,14 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
     secure = true;
-    List<Object[]> params = new ArrayList<Object[]>();
+    List<Object[]> params = new ArrayList<>();
     params.add(new Object[] {Boolean.TRUE});
     return params;
   }
 
   public TestIncrementalBackupWithBulkLoad(Boolean b) {
   }
+
   // implement all test cases in 1 test since incremental backup/restore has dependencies
   @Test
   public void TestIncBackupDeleteTable() throws Exception {
@@ -78,9 +80,8 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     LOG.info("create full backup image for all tables");
 
     List<TableName> tables = Lists.newArrayList(table1);
-    HBaseAdmin admin = null;
     Connection conn = ConnectionFactory.createConnection(conf1);
-    admin = (HBaseAdmin) conn.getAdmin();
+    HBaseAdmin admin = (HBaseAdmin) conn.getAdmin();
     BackupAdminImpl client = new BackupAdminImpl(conn);
 
     BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
@@ -104,9 +105,9 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     LOG.debug("bulk loading into " + testName);
     int actual = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
         qualName, false, null, new byte[][][] {
-      new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
-      new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
-    }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2);
+          new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
+          new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
+        }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2);
 
     // #3 - incremental backup for table1
     tables = Lists.newArrayList(table1);
@@ -118,7 +119,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     int actual1 = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
       qualName, false, null,
       new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") },
-          new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, },
+        new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, },
       true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2);
 
     // #5 - incremental backup for table1
@@ -144,7 +145,7 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     backupIdFull = client.backupTables(request);
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair
-      = table.readBulkloadRows(tables);
+        = table.readBulkloadRows(tables);
       assertTrue("map still has " + pair.getSecond().size() + " entries",
           pair.getSecond().isEmpty());
     }
@@ -154,5 +155,4 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
     admin.close();
     conn.close();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
index bb4c52b..5aa5305 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -38,18 +38,18 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestRemoteBackup extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestRemoteBackup.class);
 
   @Override
-  public void setUp () throws Exception {
+  public void setUp() throws Exception {
     useSecondCluster = true;
     super.setUp();
   }
 
   /**
    * Verify that a remote full backup is created on a single table with data correctly.
-   * @throws Exception
+   *
+   * @throws Exception if an operation on the table fails
    */
   @Test
   public void testFullBackupRemote() throws Exception {
@@ -59,28 +59,25 @@ public class TestRemoteBackup extends TestBackupBase {
     final byte[] fam3Name = Bytes.toBytes("f3");
     final byte[] fam2Name = Bytes.toBytes("f2");
     final Connection conn = ConnectionFactory.createConnection(conf1);
-    Thread t = new Thread() {
-      @Override
-      public void run() {
-        try {
-          latch.await();
-        } catch (InterruptedException ie) {
-        }
-        try {
-          HTable t1 = (HTable) conn.getTable(table1);
-          Put p1;
-          for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
-            p1 = new Put(Bytes.toBytes("row-t1" + i));
-            p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
-            t1.put(p1);
-          }
-          LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
-          t1.close();
-        } catch (IOException ioe) {
-          throw new RuntimeException(ioe);
+    Thread t = new Thread(() -> {
+      try {
+        latch.await();
+      } catch (InterruptedException ie) {
+      }
+      try {
+        HTable t1 = (HTable) conn.getTable(table1);
+        Put p1;
+        for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
+          p1 = new Put(Bytes.toBytes("row-t1" + i));
+          p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
+          t1.put(p1);
         }
+        LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
+        t1.close();
+      } catch (IOException ioe) {
+        throw new RuntimeException(ioe);
       }
-    };
+    });
     t.start();
 
     table1Desc.addFamily(new HColumnDescriptor(fam3Name));

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
index 20bd88a..c7b6192 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -24,23 +24,21 @@ import org.slf4j.LoggerFactory;
 
 @Category(LargeTests.class)
 public class TestRemoteRestore extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestRemoteRestore.class);
 
   @Override
-  public void setUp () throws Exception {
+  public void setUp() throws Exception {
     useSecondCluster = true;
     super.setUp();
   }
 
-
   /**
    * Verify that a remote restore on a single table is successful.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testFullRestoreRemote() throws Exception {
-
     LOG.info("test remote full backup on a single table");
     String backupId =
         backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR);
@@ -55,5 +53,4 @@ public class TestRemoteRestore extends TestBackupBase {
     TEST_UTIL.deleteTable(table1_restore);
     hba.close();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
index bb0052d..5ae51fe 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
@@ -38,7 +38,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 @Category(LargeTests.class)
 public class TestRepairAfterFailedDelete extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestRepairAfterFailedDelete.class);
 
   @Test
@@ -83,10 +82,8 @@ public class TestRepairAfterFailedDelete extends TestBackupBase {
     int ret = ToolRunner.run(conf1, new BackupDriver(), args);
     assertTrue(ret == 0);
     // Verify that history length == 0
-    assertTrue (table.getBackupHistory().size() == 0);
+    assertTrue(table.getBackupHistory().size() == 0);
     table.close();
     admin.close();
   }
-
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
index eba3b37..6eccb3c 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -33,12 +33,12 @@ import org.slf4j.LoggerFactory;
 
 @Category(LargeTests.class)
 public class TestRestoreBoundaryTests extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestRestoreBoundaryTests.class);
 
   /**
-   * Verify that a single empty table is restored to a new table
-   * @throws Exception
+   * Verify that a single empty table is restored to a new table.
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testFullRestoreSingleEmpty() throws Exception {
@@ -57,7 +57,8 @@ public class TestRestoreBoundaryTests extends TestBackupBase {
 
   /**
    * Verify that multiple tables are restored to new tables.
-   * @throws Exception
+   *
+   * @throws Exception if doing the backup or an operation on the tables fails
    */
   @Test
   public void testFullRestoreMultipleEmpty() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
index 31d11e2..6703b3d 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
@@ -28,16 +28,15 @@ import org.slf4j.LoggerFactory;
 
 @Category(LargeTests.class)
 public class TestSystemTableSnapshot extends TestBackupBase {
-
   private static final Logger LOG = LoggerFactory.getLogger(TestSystemTableSnapshot.class);
 
   /**
-   * Verify backup system table snapshot
-   * @throws Exception
+   * Verify backup system table snapshot.
+   *
+   * @throws Exception if an operation on the table fails
    */
  // @Test
   public void _testBackupRestoreSystemTable() throws Exception {
-
     LOG.info("test snapshot system table");
 
     TableName backupSystem = BackupSystemTable.getTableName(conf1);
@@ -51,5 +50,4 @@ public class TestSystemTableSnapshot extends TestBackupBase {
     hba.enableTable(backupSystem);
     hba.close();
   }
-
 }


[2/3] hbase git commit: HBASE-19765 Fixed Checkstyle errors in hbase-backup

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index cf34d14..2d6cf26 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
  * value = backupId and full WAL file name</li>
  * </ul></p>
  */
-
 @InterfaceAudience.Private
 public final class BackupSystemTable implements Closeable {
   private static final Logger LOG = LoggerFactory.getLogger(BackupSystemTable.class);
@@ -121,7 +120,6 @@ public final class BackupSystemTable implements Closeable {
     public String toString() {
       return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + Path.SEPARATOR + walFile;
     }
-
   }
 
   /**
@@ -136,7 +134,6 @@ public final class BackupSystemTable implements Closeable {
    * from activity of RegionObserver, which controls process of a bulk loading
    * {@link org.apache.hadoop.hbase.backup.BackupObserver}
    */
-
   private TableName bulkLoadTableName;
 
   /**
@@ -212,7 +209,6 @@ public final class BackupSystemTable implements Closeable {
       }
       waitForSystemTable(admin, tableName);
       waitForSystemTable(admin, bulkLoadTableName);
-
     }
   }
 
@@ -246,7 +242,6 @@ public final class BackupSystemTable implements Closeable {
       }
     }
     LOG.debug("Backup table "+tableName+" exists and available");
-
   }
 
   @Override
@@ -260,7 +255,6 @@ public final class BackupSystemTable implements Closeable {
    * @throws IOException exception
    */
   public void updateBackupInfo(BackupInfo info) throws IOException {
-
     if (LOG.isTraceEnabled()) {
       LOG.trace("update backup status in backup system table for: " + info.getBackupId()
           + " set status=" + info.getState());
@@ -356,9 +350,7 @@ public final class BackupSystemTable implements Closeable {
    * @param backupId backup id
    * @throws IOException exception
    */
-
   public void deleteBackupInfo(String backupId) throws IOException {
-
     if (LOG.isTraceEnabled()) {
       LOG.trace("delete backup status in backup system table for " + backupId);
     }
@@ -447,7 +439,7 @@ public final class BackupSystemTable implements Closeable {
           String fam = null;
           String path = null;
           boolean raw = false;
-          byte[] row = null;
+          byte[] row;
           String region = null;
           for (Cell cell : res.listCells()) {
             row = CellUtil.cloneRow(cell);
@@ -465,19 +457,21 @@ public final class BackupSystemTable implements Closeable {
               byte[] state = CellUtil.cloneValue(cell);
               if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
                 raw = true;
-              } else raw = false;
+              } else {
+                raw = false;
+              }
             }
           }
           if (map.get(tTable) == null) {
-            map.put(tTable, new HashMap<String, Map<String, List<Pair<String, Boolean>>>>());
+            map.put(tTable, new HashMap<>());
             tblMap = map.get(tTable);
           }
           if (tblMap.get(region) == null) {
-            tblMap.put(region, new HashMap<String, List<Pair<String, Boolean>>>());
+            tblMap.put(region, new HashMap<>());
           }
           Map<String, List<Pair<String, Boolean>>> famMap = tblMap.get(region);
           if (famMap.get(fam) == null) {
-            famMap.put(fam, new ArrayList<Pair<String, Boolean>>());
+            famMap.put(fam, new ArrayList<>());
           }
           famMap.get(fam).add(new Pair<>(path, raw));
           LOG.debug("found orig " + path + " for " + fam + " of table " + region);
@@ -501,7 +495,11 @@ public final class BackupSystemTable implements Closeable {
       for (int idx = 0; idx < maps.length; idx++) {
         Map<byte[], List<Path>> map = maps[idx];
         TableName tn = sTableList.get(idx);
-        if (map == null) continue;
+
+        if (map == null) {
+          continue;
+        }
+
         for (Map.Entry<byte[], List<Path>> entry : map.entrySet()) {
           byte[] fam = entry.getKey();
           List<Path> paths = entry.getValue();
@@ -524,7 +522,6 @@ public final class BackupSystemTable implements Closeable {
    * @param backupId backup id
    * @return Current status of backup session or null
    */
-
   public BackupInfo readBackupInfo(String backupId) throws IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("read backup status from backup system table for: " + backupId);
@@ -585,7 +582,8 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Exclusive operations are:
    * create, delete, merge
-   * @throws IOException
+   * @throws IOException if a table operation fails or an active backup exclusive operation is
+   *                     already underway
    */
   public void startBackupExclusiveOperation() throws IOException {
     LOG.debug("Start new backup exclusive operation");
@@ -642,8 +640,8 @@ public final class BackupSystemTable implements Closeable {
 
     try (Table table = connection.getTable(tableName);
         ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
-      HashMap<String, Long> rsTimestampMap = new HashMap<String, Long>();
+      Result res;
+      HashMap<String, Long> rsTimestampMap = new HashMap<>();
       while ((res = scanner.next()) != null) {
         res.advance();
         Cell cell = res.current();
@@ -690,7 +688,7 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Get all backups history
    * @return list of backup info
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistory() throws IOException {
     return getBackupHistory(false);
@@ -701,7 +699,7 @@ public final class BackupSystemTable implements Closeable {
    * @param n number of records, if n== -1 - max number
    *        is ignored
    * @return list of records
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getHistory(int n) throws IOException {
     List<BackupInfo> history = getBackupHistory();
@@ -717,15 +715,20 @@ public final class BackupSystemTable implements Closeable {
    *        is ignored
    * @param filters list of filters
    * @return backup records
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistory(int n, BackupInfo.Filter... filters) throws IOException {
-    if (filters.length == 0) return getHistory(n);
+    if (filters.length == 0) {
+      return getHistory(n);
+    }
 
     List<BackupInfo> history = getBackupHistory();
-    List<BackupInfo> result = new ArrayList<BackupInfo>();
+    List<BackupInfo> result = new ArrayList<>();
     for (BackupInfo bi : history) {
-      if (n >= 0 && result.size() == n) break;
+      if (n >= 0 && result.size() == n) {
+        break;
+      }
+
       boolean passed = true;
       for (int i = 0; i < filters.length; i++) {
         if (!filters[i].apply(bi)) {
@@ -738,7 +741,6 @@ public final class BackupSystemTable implements Closeable {
       }
     }
     return result;
-
   }
 
   /*
@@ -761,7 +763,7 @@ public final class BackupSystemTable implements Closeable {
    * Get history for backup destination
    * @param backupRoot backup destination path
    * @return List of backup info
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistory(String backupRoot) throws IOException {
     ArrayList<BackupInfo> history = getBackupHistory(false);
@@ -778,11 +780,11 @@ public final class BackupSystemTable implements Closeable {
    * Get history for a table
    * @param name table name
    * @return history for a table
-   * @throws IOException
+   * @throws IOException if getting the backup history fails
    */
   public List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOException {
     List<BackupInfo> history = getBackupHistory();
-    List<BackupInfo> tableHistory = new ArrayList<BackupInfo>();
+    List<BackupInfo> tableHistory = new ArrayList<>();
     for (BackupInfo info : history) {
       List<TableName> tables = info.getTableNames();
       if (tables.contains(name)) {
@@ -795,8 +797,7 @@ public final class BackupSystemTable implements Closeable {
   public Map<TableName, ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set,
       String backupRoot) throws IOException {
     List<BackupInfo> history = getBackupHistory(backupRoot);
-    Map<TableName, ArrayList<BackupInfo>> tableHistoryMap =
-        new HashMap<TableName, ArrayList<BackupInfo>>();
+    Map<TableName, ArrayList<BackupInfo>> tableHistoryMap = new HashMap<>();
     for (Iterator<BackupInfo> iterator = history.iterator(); iterator.hasNext();) {
       BackupInfo info = iterator.next();
       if (!backupRoot.equals(info.getBackupRootDir())) {
@@ -807,7 +808,7 @@ public final class BackupSystemTable implements Closeable {
         if (set.contains(tableName)) {
           ArrayList<BackupInfo> list = tableHistoryMap.get(tableName);
           if (list == null) {
-            list = new ArrayList<BackupInfo>();
+            list = new ArrayList<>();
             tableHistoryMap.put(tableName, list);
           }
           list.add(info);
@@ -827,11 +828,11 @@ public final class BackupSystemTable implements Closeable {
     LOG.trace("get backup infos from backup system table");
 
     Scan scan = createScanForBackupHistory();
-    ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
+    ArrayList<BackupInfo> list = new ArrayList<>();
 
     try (Table table = connection.getTable(tableName);
         ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
+      Result res;
       while ((res = scanner.next()) != null) {
         res.advance();
         BackupInfo context = cellToBackupInfo(res.current());
@@ -859,7 +860,7 @@ public final class BackupSystemTable implements Closeable {
       LOG.trace("write RS log time stamps to backup system table for tables ["
           + StringUtils.join(tables, ",") + "]");
     }
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<>();
     for (TableName table : tables) {
       byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray();
       Put put = createPutForWriteRegionServerLogTimestamp(table, smapData, backupRoot);
@@ -885,13 +886,12 @@ public final class BackupSystemTable implements Closeable {
       LOG.trace("read RS log ts from backup system table for root=" + backupRoot);
     }
 
-    HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-        new HashMap<TableName, HashMap<String, Long>>();
+    HashMap<TableName, HashMap<String, Long>> tableTimestampMap = new HashMap<>();
 
     Scan scan = createScanForReadLogTimestampMap(backupRoot);
     try (Table table = connection.getTable(tableName);
         ResultScanner scanner = table.getScanner(scan)) {
-      Result res = null;
+      Result res;
       while ((res = scanner.next()) != null) {
         res.advance();
         Cell cell = res.current();
@@ -936,7 +936,7 @@ public final class BackupSystemTable implements Closeable {
 
   private HashMap<String, Long> fromTableServerTimestampProto(
       BackupProtos.TableServerTimestamp proto) {
-    HashMap<String, Long> map = new HashMap<String, Long>();
+    HashMap<String, Long> map = new HashMap<>();
     List<BackupProtos.ServerTimestamp> list = proto.getServerTimestampList();
     for (BackupProtos.ServerTimestamp st : list) {
       ServerName sn =
@@ -997,7 +997,6 @@ public final class BackupSystemTable implements Closeable {
    * Deletes incremental backup set for a backup destination
    * @param backupRoot backup root
    */
-
   public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("Delete incremental backup table set to backup system table. ROOT=" + backupRoot);
@@ -1084,7 +1083,6 @@ public final class BackupSystemTable implements Closeable {
         throw new RuntimeException("remove is not supported");
       }
     };
-
   }
 
   /**
@@ -1092,8 +1090,8 @@ public final class BackupSystemTable implements Closeable {
    * @param file name of a file to check
    * @return true, if deletable, false otherwise.
    * @throws IOException exception
-   * TODO: multiple backup destination support
    */
+  // TODO: multiple backup destination support
   public boolean isWALFileDeletable(String file) throws IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("Check if WAL file has been already backed up in backup system table " + file);
@@ -1174,12 +1172,12 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Get backup set list
    * @return backup set list
-   * @throws IOException
+   * @throws IOException if a table or scanner operation fails
    */
   public List<String> listBackupSets() throws IOException {
     LOG.trace("Backup set list");
 
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     Table table = null;
     ResultScanner scanner = null;
     try {
@@ -1187,7 +1185,7 @@ public final class BackupSystemTable implements Closeable {
       Scan scan = createScanForBackupSetList();
       scan.setMaxVersions(1);
       scanner = table.getScanner(scan);
-      Result res = null;
+      Result res;
       while ((res = scanner.next()) != null) {
         res.advance();
         list.add(cellKeyToBackupSetName(res.current()));
@@ -1207,7 +1205,7 @@ public final class BackupSystemTable implements Closeable {
    * Get backup set description (list of tables)
    * @param name set's name
    * @return list of tables in a backup set
-   * @throws IOException
+   * @throws IOException if a table operation fails
    */
   public List<TableName> describeBackupSet(String name) throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -1218,7 +1216,11 @@ public final class BackupSystemTable implements Closeable {
       table = connection.getTable(tableName);
       Get get = createGetForBackupSet(name);
       Result res = table.get(get);
-      if (res.isEmpty()) return null;
+
+      if (res.isEmpty()) {
+        return null;
+      }
+
       res.advance();
       String[] tables = cellValueToBackupSet(res.current());
       return Arrays.asList(tables).stream().map(item -> TableName.valueOf(item)).
@@ -1234,7 +1236,7 @@ public final class BackupSystemTable implements Closeable {
    * Add backup set (list of tables)
    * @param name set name
    * @param newTables list of tables, comma-separated
-   * @throws IOException
+   * @throws IOException if a table operation fails
    */
   public void addToBackupSet(String name, String[] newTables) throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -1260,15 +1262,15 @@ public final class BackupSystemTable implements Closeable {
    * Remove tables from backup set (list of tables)
    * @param name set name
    * @param toRemove list of tables
-   * @throws IOException
+   * @throws IOException if a table operation or deleting the backup set fails
    */
   public void removeFromBackupSet(String name, String[] toRemove) throws IOException {
     if (LOG.isTraceEnabled()) {
       LOG.trace(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ")
           + "]");
     }
-    String[] disjoint = null;
-    String[] tables = null;
+    String[] disjoint;
+    String[] tables;
     try (Table table = connection.getTable(tableName)) {
       Get get = createGetForBackupSet(name);
       Result res = table.get(get);
@@ -1309,7 +1311,7 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Delete backup set
    * @param name set's name
-   * @throws IOException
+   * @throws IOException if getting or deleting the table fails
    */
   public void deleteBackupSet(String name) throws IOException {
     if (LOG.isTraceEnabled()) {
@@ -1326,7 +1328,6 @@ public final class BackupSystemTable implements Closeable {
    * @return table's descriptor
    */
   public static TableDescriptor getSystemTableDescriptor(Configuration conf) {
-
     TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf));
 
     ColumnFamilyDescriptorBuilder colBuilder =
@@ -1334,8 +1335,7 @@ public final class BackupSystemTable implements Closeable {
 
     colBuilder.setMaxVersions(1);
     Configuration config = HBaseConfiguration.create();
-    int ttl =
-        config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
+    int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
           BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
     colBuilder.setTimeToLive(ttl);
 
@@ -1369,7 +1369,6 @@ public final class BackupSystemTable implements Closeable {
    * @return table's descriptor
    */
   public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
-
     TableDescriptorBuilder builder =
         TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));
 
@@ -1377,8 +1376,7 @@ public final class BackupSystemTable implements Closeable {
         ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
     colBuilder.setMaxVersions(1);
     Configuration config = HBaseConfiguration.create();
-    int ttl =
-        config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
+    int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
           BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
     colBuilder.setTimeToLive(ttl);
     ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
@@ -1391,8 +1389,7 @@ public final class BackupSystemTable implements Closeable {
   }
 
   public static TableName getTableNameForBulkLoadedData(Configuration conf) {
-    String name =
-        conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
+    String name = conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY,
           BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT) + "_bulk";
     return TableName.valueOf(name);
   }
@@ -1426,7 +1423,6 @@ public final class BackupSystemTable implements Closeable {
    * Creates Delete operation for a given backup id
    * @param backupId backup's ID
    * @return delete operation
-   * @throws IOException exception
    */
   private Delete createDeleteForBackupInfo(String backupId) {
     Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId));
@@ -1461,7 +1457,6 @@ public final class BackupSystemTable implements Closeable {
   /**
    * Creates Put operation to store start code to backup system table
    * @return put operation
-   * @throws IOException exception
    */
   private Put createPutForStartCode(String startCode, String rootPath) {
     Put put = new Put(rowkey(START_CODE_ROW, rootPath));
@@ -1635,7 +1630,8 @@ public final class BackupSystemTable implements Closeable {
         put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
         put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
         puts.add(put);
-        LOG.debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
+        LOG.debug("writing done bulk path " + file + " for " + table + " "
+                + Bytes.toString(region));
       }
     }
     return puts;
@@ -1867,7 +1863,7 @@ public final class BackupSystemTable implements Closeable {
     }
   }
 
-  static Scan createScanForOrigBulkLoadedFiles(TableName table) throws IOException {
+  static Scan createScanForOrigBulkLoadedFiles(TableName table) {
     Scan scan = new Scan();
     byte[] startRow = rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM);
     byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
@@ -1901,7 +1897,7 @@ public final class BackupSystemTable implements Closeable {
    * @param backupId the backup Id. It can be null when querying for all tables
    * @return the Scan object
    */
-  static Scan createScanForBulkLoadedFiles(String backupId) throws IOException {
+  static Scan createScanForBulkLoadedFiles(String backupId) {
     Scan scan = new Scan();
     byte[] startRow =
         backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId
@@ -1910,7 +1906,6 @@ public final class BackupSystemTable implements Closeable {
     stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
     scan.setStartRow(startRow);
     scan.setStopRow(stopRow);
-    // scan.setTimeRange(lower, Long.MAX_VALUE);
     scan.addFamily(BackupSystemTable.META_FAMILY);
     scan.setMaxVersions(1);
     return scan;
@@ -1930,18 +1925,17 @@ public final class BackupSystemTable implements Closeable {
    * @param files list of WAL file paths
    * @param backupId backup id
    * @return put list
-   * @throws IOException exception
    */
-  private List<Put>
-      createPutsForAddWALFiles(List<String> files, String backupId, String backupRoot)
-          throws IOException {
-    List<Put> puts = new ArrayList<Put>(files.size());
+  private List<Put> createPutsForAddWALFiles(List<String> files, String backupId,
+          String backupRoot) {
+    List<Put> puts = new ArrayList<>(files.size());
     for (String file : files) {
       Put put = new Put(rowkey(WALS_PREFIX, BackupUtils.getUniqueWALFileNamePart(file)));
       put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("backupId"),
         Bytes.toBytes(backupId));
       put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("file"), Bytes.toBytes(file));
-      put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"), Bytes.toBytes(backupRoot));
+      put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"),
+              Bytes.toBytes(backupRoot));
       puts.add(put);
     }
     return puts;
@@ -1968,9 +1962,8 @@ public final class BackupSystemTable implements Closeable {
    * Creates Get operation for a given wal file name TODO: support for backup destination
    * @param file file
    * @return get operation
-   * @throws IOException exception
    */
-  private Get createGetForCheckWALFile(String file) throws IOException {
+  private Get createGetForCheckWALFile(String file) {
     Get get = new Get(rowkey(WALS_PREFIX, BackupUtils.getUniqueWALFileNamePart(file)));
     // add backup root column
     get.addFamily(BackupSystemTable.META_FAMILY);
@@ -2034,9 +2027,8 @@ public final class BackupSystemTable implements Closeable {
    * Converts cell to backup set list.
    * @param current current cell
    * @return backup set as array of table names
-   * @throws IOException
    */
-  private String[] cellValueToBackupSet(Cell current) throws IOException {
+  private String[] cellValueToBackupSet(Cell current) {
     byte[] data = CellUtil.cloneValue(current);
     if (!ArrayUtils.isEmpty(data)) {
       return Bytes.toString(data).split(",");
@@ -2048,9 +2040,8 @@ public final class BackupSystemTable implements Closeable {
    * Converts cell key to backup set name.
    * @param current current cell
    * @return backup set name
-   * @throws IOException
    */
-  private String cellKeyToBackupSetName(Cell current) throws IOException {
+  private String cellKeyToBackupSetName(Cell current) {
     byte[] data = CellUtil.cloneRow(current);
     return Bytes.toString(data).substring(SET_KEY_PREFIX.length());
   }
@@ -2062,5 +2053,4 @@ public final class BackupSystemTable implements Closeable {
     }
     return sb.toString().getBytes();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
index c88c896..c0103f5 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Full table backup implementation
@@ -86,7 +86,7 @@ public class FullTableBackupClient extends TableBackupClient {
       // Currently we simply set the sub copy tasks by counting the table snapshot number, we can
       // calculate the real files' size for the percentage in the future.
       // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
-      int res = 0;
+      int res;
       String[] args = new String[4];
       args[0] = "-snapshot";
       args[1] = backupInfo.getSnapshotName(table);
@@ -116,23 +116,24 @@ public class FullTableBackupClient extends TableBackupClient {
   }
 
   /**
-   * Backup request execution
-   * @throws IOException
+   * Backup request execution.
+   *
+   * @throws IOException if the execution of the backup fails
    */
   @Override
   public void execute() throws IOException {
     try (Admin admin = conn.getAdmin()) {
       // Begin BACKUP
       beginBackup(backupManager, backupInfo);
-      String savedStartCode = null;
-      boolean firstBackup = false;
+      String savedStartCode;
+      boolean firstBackup;
       // do snapshot for full table backup
 
       savedStartCode = backupManager.readBackupStartCode();
       firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
       if (firstBackup) {
-        // This is our first backup. Let's put some marker to system table so that we can hold the logs
-        // while we do the backup.
+        // This is our first backup. Let's put some marker to system table so that we can hold the
+        // logs while we do the backup.
         backupManager.writeBackupStartCode(0L);
       }
       // We roll log here before we do the snapshot. It is possible there is duplicate data
@@ -142,7 +143,7 @@ public class FullTableBackupClient extends TableBackupClient {
       // the snapshot.
       LOG.info("Execute roll log procedure for full backup ...");
 
-      Map<String, String> props = new HashMap<String, String>();
+      Map<String, String> props = new HashMap<>();
       props.put("backupRoot", backupInfo.getBackupRootDir());
       admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
         LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
@@ -198,13 +199,10 @@ public class FullTableBackupClient extends TableBackupClient {
         BackupType.FULL, conf);
       throw new IOException(e);
     }
-
   }
 
-
   protected void snapshotTable(Admin admin, TableName tableName, String snapshotName)
       throws IOException {
-
     int maxAttempts =
         conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
     int pause =

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 5470823..a20f9b5 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -63,8 +63,7 @@ public class IncrementalBackupManager extends BackupManager {
    * @return The new HashMap of RS log time stamps after the log roll for this incremental backup.
    * @throws IOException exception
    */
-  public HashMap<String, Long> getIncrBackupLogFileMap()
-      throws IOException {
+  public HashMap<String, Long> getIncrBackupLogFileMap() throws IOException {
     List<String> logList;
     HashMap<String, Long> newTimestamps;
     HashMap<String, Long> previousTimestampMins;
@@ -89,7 +88,7 @@ public class IncrementalBackupManager extends BackupManager {
     }
 
     LOG.info("Execute roll log procedure for incremental backup ...");
-    HashMap<String, String> props = new HashMap<String, String>();
+    HashMap<String, String> props = new HashMap<>();
     props.put("backupRoot", backupInfo.getBackupRootDir());
 
     try (Admin admin = conn.getAdmin()) {
@@ -109,12 +108,12 @@ public class IncrementalBackupManager extends BackupManager {
   }
 
   /**
-   * Get list of WAL files eligible for incremental backup
+   * Get list of WAL files eligible for incremental backup.
+   *
    * @return list of WAL files
-   * @throws IOException
+   * @throws IOException if getting the list of WAL files fails
    */
-  public List<String> getIncrBackupLogFileList()
-      throws IOException {
+  public List<String> getIncrBackupLogFileList() throws IOException {
     List<String> logList;
     HashMap<String, Long> newTimestamps;
     HashMap<String, Long> previousTimestampMins;
@@ -154,14 +153,17 @@ public class IncrementalBackupManager extends BackupManager {
 
   private List<String> excludeAlreadyBackedUpWALs(List<String> logList,
       List<WALItem> logFromSystemTable) {
-
     Set<String> walFileNameSet = convertToSet(logFromSystemTable);
 
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     for (int i=0; i < logList.size(); i++) {
       Path p = new Path(logList.get(i));
       String name  = p.getName();
-      if (walFileNameSet.contains(name)) continue;
+
+      if (walFileNameSet.contains(name)) {
+        continue;
+      }
+
       list.add(logList.get(i));
     }
     return list;
@@ -169,12 +171,11 @@ public class IncrementalBackupManager extends BackupManager {
 
   /**
    * Create Set of WAL file names (not full path names)
-   * @param logFromSystemTable
+   * @param logFromSystemTable the logs from the system table to convert
    * @return set of WAL file names
    */
   private Set<String> convertToSet(List<WALItem> logFromSystemTable) {
-
-    Set<String> set = new HashSet<String>();
+    Set<String> set = new HashSet<>();
     for (int i=0; i < logFromSystemTable.size(); i++) {
       WALItem item = logFromSystemTable.get(i);
       set.add(item.walFile);
@@ -188,11 +189,11 @@ public class IncrementalBackupManager extends BackupManager {
    * @param olderTimestamps timestamp map for each region server of the last backup.
    * @param newestTimestamps timestamp map for each region server that the backup should lead to.
    * @return list of log files which needs to be added to this backup
-   * @throws IOException
+   * @throws IOException if getting the WAL files from the backup system fails
    */
   private List<WALItem> getLogFilesFromBackupSystem(HashMap<String, Long> olderTimestamps,
       HashMap<String, Long> newestTimestamps, String backupRoot) throws IOException {
-    List<WALItem> logFiles = new ArrayList<WALItem>();
+    List<WALItem> logFiles = new ArrayList<>();
     Iterator<WALItem> it = getWALFilesFromBackupSystem();
     while (it.hasNext()) {
       WALItem item = it.next();
@@ -248,8 +249,8 @@ public class IncrementalBackupManager extends BackupManager {
     FileSystem fs = rootdir.getFileSystem(conf);
     NewestLogFilter pathFilter = new NewestLogFilter();
 
-    List<String> resultLogFiles = new ArrayList<String>();
-    List<String> newestLogs = new ArrayList<String>();
+    List<String> resultLogFiles = new ArrayList<>();
+    List<String> newestLogs = new ArrayList<>();
 
     /*
      * The old region servers and timestamps info we kept in backup system table may be out of sync
@@ -259,7 +260,6 @@ public class IncrementalBackupManager extends BackupManager {
      * with. We'll just use all the logs in that directory. We always write up-to-date region server
      * and timestamp info to backup system table at the end of successful backup.
      */
-
     FileStatus[] rss;
     Path p;
     String host;
@@ -381,5 +381,4 @@ public class IncrementalBackupManager extends BackupManager {
       }
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 34d713d..c897ae2 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -72,7 +72,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
   }
 
   protected List<String> filterMissingFiles(List<String> incrBackupFileList) throws IOException {
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     for (String file : incrBackupFileList) {
       Path p = new Path(file);
       if (fs.exists(p) || isActiveWalPath(p)) {
@@ -94,7 +94,10 @@ public class IncrementalTableBackupClient extends TableBackupClient {
   }
 
   protected static int getIndex(TableName tbl, List<TableName> sTableList) {
-    if (sTableList == null) return 0;
+    if (sTableList == null) {
+      return 0;
+    }
+
     for (int i = 0; i < sTableList.size(); i++) {
       if (tbl.equals(sTableList.get(i))) {
         return i;
@@ -110,12 +113,13 @@ public class IncrementalTableBackupClient extends TableBackupClient {
    * @return map of table to List of files
    */
   @SuppressWarnings("unchecked")
-  protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList) throws IOException {
+  protected Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList)
+          throws IOException {
     Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
-    List<String> activeFiles = new ArrayList<String>();
-    List<String> archiveFiles = new ArrayList<String>();
+    List<String> activeFiles = new ArrayList<>();
+    List<String> archiveFiles = new ArrayList<>();
     Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair =
-    backupManager.readBulkloadRows(sTableList);
+            backupManager.readBulkloadRows(sTableList);
     Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = pair.getFirst();
     FileSystem tgtFs;
     try {
@@ -136,7 +140,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
         continue;
       }
       if (mapForSrc[srcIdx] == null) {
-        mapForSrc[srcIdx] = new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR);
+        mapForSrc[srcIdx] = new TreeMap<>(Bytes.BYTES_COMPARATOR);
       }
       Path tblDir = FSUtils.getTableDir(rootdir, srcTable);
       Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()),
@@ -152,7 +156,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
           Path famDir = new Path(regionDir, fam);
           List<Path> files;
           if (!mapForSrc[srcIdx].containsKey(fam.getBytes())) {
-            files = new ArrayList<Path>();
+            files = new ArrayList<>();
             mapForSrc[srcIdx].put(fam.getBytes(), files);
           } else {
             files = mapForSrc[srcIdx].get(fam.getBytes());
@@ -177,13 +181,13 @@ public class IncrementalTableBackupClient extends TableBackupClient {
               if (LOG.isTraceEnabled()) {
                 LOG.trace("found bulk hfile " + file + " in " + famDir + " for " + tblName);
               }
-                if (LOG.isTraceEnabled()) {
-                  LOG.trace("copying " + p + " to " + tgt);
-                }
-                activeFiles.add(p.toString());
+              if (LOG.isTraceEnabled()) {
+                LOG.trace("copying " + p + " to " + tgt);
+              }
+              activeFiles.add(p.toString());
             } else if (fs.exists(archive)){
               LOG.debug("copying archive " + archive + " to " + tgt);
-                archiveFiles.add(archive.toString());
+              archiveFiles.add(archive.toString());
             }
             files.add(tgt);
           }
@@ -198,7 +202,6 @@ public class IncrementalTableBackupClient extends TableBackupClient {
 
   private void copyBulkLoadedFiles(List<String> activeFiles, List<String> archiveFiles)
       throws IOException {
-
     try {
       // Enable special mode of BackupDistCp
       conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5);
@@ -220,7 +223,6 @@ public class IncrementalTableBackupClient extends TableBackupClient {
           // Update active and archived lists
           // When file is being moved from active to archive
           // directory, the number of active files decreases
-
           int numOfActive = activeFiles.size();
           updateFileLists(activeFiles, archiveFiles);
           if (activeFiles.size() < numOfActive) {
@@ -242,12 +244,11 @@ public class IncrementalTableBackupClient extends TableBackupClient {
       // Disable special mode of BackupDistCp
       conf.unset(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY);
     }
-
   }
 
   private void updateFileLists(List<String> activeFiles, List<String> archiveFiles)
       throws IOException {
-    List<String> newlyArchived = new ArrayList<String>();
+    List<String> newlyArchived = new ArrayList<>();
 
     for (String spath : activeFiles) {
       if (!fs.exists(new Path(spath))) {
@@ -261,12 +262,10 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     }
 
     LOG.debug(newlyArchived.size() + " files have been archived.");
-
   }
 
   @Override
   public void execute() throws IOException {
-
     try {
       // case PREPARE_INCREMENTAL:
       beginBackup(backupManager, backupInfo);
@@ -288,7 +287,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
       BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
       // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
       convertWALsToHFiles();
-      incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, backupInfo.getBackupRootDir());
+      incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()},
+              backupInfo.getBackupRootDir());
       // Save list of WAL files copied
       backupManager.recordWALFiles(backupInfo.getIncrBackupFileList());
     } catch (Exception e) {
@@ -329,9 +329,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
   }
 
   protected void incrementalCopyHFiles(String[] files, String backupDest) throws IOException {
-
     try {
-      LOG.debug("Incremental copy HFiles is starting. dest="+backupDest);
+      LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest);
       // set overall backup phase: incremental_copy
       backupInfo.setPhase(BackupPhase.INCREMENTAL_COPY);
       // get incremental backup file list and prepare parms for DistCp
@@ -366,7 +365,6 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     if (!result) {
       LOG.warn("Could not delete " + path);
     }
-
   }
 
   protected void convertWALsToHFiles() throws IOException {
@@ -386,7 +384,6 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     }
   }
 
-
   protected boolean tableExists(TableName table, Connection conn) throws IOException {
     try (Admin admin = conn.getAdmin()) {
       return admin.tableExists(table);
@@ -394,7 +391,6 @@ public class IncrementalTableBackupClient extends TableBackupClient {
   }
 
   protected void walToHFiles(List<String> dirPaths, TableName tableName) throws IOException {
-
     Tool player = new WALPlayer();
 
     // Player reads all files in arbitrary directory structure and creates
@@ -439,5 +435,4 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     path = new Path(path, backupId);
     return path;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
index c6b6bad..c52d658 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
@@ -63,7 +63,7 @@ public class RestoreTablesClient {
   private String targetRootDir;
   private boolean isOverwrite;
 
-  public RestoreTablesClient(Connection conn, RestoreRequest request) throws IOException {
+  public RestoreTablesClient(Connection conn, RestoreRequest request) {
     this.targetRootDir = request.getBackupRootDir();
     this.backupId = request.getBackupId();
     this.sTableArray = request.getFromTables();
@@ -74,13 +74,11 @@ public class RestoreTablesClient {
     this.isOverwrite = request.isOverwrite();
     this.conn = conn;
     this.conf = conn.getConfiguration();
-
   }
 
   /**
-   * Validate target tables
-   * @param conn connection
-   * @param mgr table state manager
+   * Validate target tables.
+   *
    * @param tTableArray: target tables
    * @param isOverwrite overwrite existing table
    * @throws IOException exception
@@ -125,8 +123,8 @@ public class RestoreTablesClient {
   }
 
   /**
-   * Restore operation handle each backupImage in array
-   * @param svc: master services
+   * Restore operation handle each backupImage in array.
+   *
    * @param images: array BackupImage
    * @param sTable: table to be restored
    * @param tTable: table to be restored to
@@ -136,7 +134,6 @@ public class RestoreTablesClient {
 
   private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable,
       boolean truncateIfExists) throws IOException {
-
     // First image MUST be image of a FULL backup
     BackupImage image = images[0];
     String rootDir = image.getRootDir();
@@ -163,7 +160,7 @@ public class RestoreTablesClient {
       return;
     }
 
-    List<Path> dirList = new ArrayList<Path>();
+    List<Path> dirList = new ArrayList<>();
     // add full backup path
     // full backup path comes first
     for (int i = 1; i < images.length; i++) {
@@ -188,7 +185,7 @@ public class RestoreTablesClient {
   private List<Path> getFilesRecursively(String fileBackupDir)
       throws IllegalArgumentException, IOException {
     FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration());
-    List<Path> list = new ArrayList<Path>();
+    List<Path> list = new ArrayList<>();
     RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(fileBackupDir), true);
     while (it.hasNext()) {
       Path p = it.next().getPath();
@@ -204,13 +201,11 @@ public class RestoreTablesClient {
    * @param backupManifestMap : tableName, Manifest
    * @param sTableArray The array of tables to be restored
    * @param tTableArray The array of mapping tables to restore to
-   * @return set of BackupImages restored
    * @throws IOException exception
    */
   private void restore(HashMap<TableName, BackupManifest> backupManifestMap,
       TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
-    TreeSet<BackupImage> restoreImageSet = new TreeSet<BackupImage>();
-    boolean truncateIfExists = isOverwrite;
+    TreeSet<BackupImage> restoreImageSet = new TreeSet<>();
     Set<String> backupIdSet = new HashSet<>();
 
     for (int i = 0; i < sTableArray.length; i++) {
@@ -219,20 +214,21 @@ public class RestoreTablesClient {
       BackupManifest manifest = backupManifestMap.get(table);
       // Get the image list of this backup for restore in time order from old
       // to new.
-      List<BackupImage> list = new ArrayList<BackupImage>();
+      List<BackupImage> list = new ArrayList<>();
       list.add(manifest.getBackupImage());
-      TreeSet<BackupImage> set = new TreeSet<BackupImage>(list);
+      TreeSet<BackupImage> set = new TreeSet<>(list);
       List<BackupImage> depList = manifest.getDependentListByTable(table);
       set.addAll(depList);
       BackupImage[] arr = new BackupImage[set.size()];
       set.toArray(arr);
-      restoreImages(arr, table, tTableArray[i], truncateIfExists);
+      restoreImages(arr, table, tTableArray[i], isOverwrite);
       restoreImageSet.addAll(list);
       if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
         LOG.info("Restore includes the following image(s):");
         for (BackupImage image : restoreImageSet) {
           LOG.info("Backup: " + image.getBackupId() + " "
-              + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table));
+              + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(),
+                  table));
           if (image.getType() == BackupType.INCREMENTAL) {
             backupIdSet.add(image.getBackupId());
             LOG.debug("adding " + image.getBackupId() + " for bulk load");
@@ -251,14 +247,10 @@ public class RestoreTablesClient {
   }
 
   static boolean withinRange(long a, long lower, long upper) {
-    if (a < lower || a > upper) {
-      return false;
-    }
-    return true;
+    return a >= lower && a <= upper;
   }
 
   public void execute() throws IOException {
-
     // case VALIDATION:
     // check the target tables
     checkTargetTables(tTableArray, isOverwrite);
@@ -272,5 +264,4 @@ public class RestoreTablesClient {
 
     restore(backupManifestMap, sTableArray, tTableArray, isOverwrite);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
index ab24cca..7d960b4 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
@@ -35,13 +35,13 @@ import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
@@ -80,8 +80,7 @@ public abstract class TableBackupClient {
   }
 
   public void init(final Connection conn, final String backupId, BackupRequest request)
-      throws IOException
-  {
+      throws IOException {
     if (request.getBackupType() == BackupType.FULL) {
       backupManager = new BackupManager(conn, conn.getConfiguration());
     } else {
@@ -137,10 +136,10 @@ public abstract class TableBackupClient {
   /**
    * Delete HBase snapshot for backup.
    * @param backupInfo backup info
-   * @throws Exception exception
+   * @throws IOException exception
    */
-  protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo, Configuration conf)
-      throws IOException {
+  protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo,
+      Configuration conf) throws IOException {
     LOG.debug("Trying to delete snapshot for full backup.");
     for (String snapshotName : backupInfo.getSnapshotNames()) {
       if (snapshotName == null) {
@@ -228,11 +227,10 @@ public abstract class TableBackupClient {
    * Fail the overall backup.
    * @param backupInfo backup info
    * @param e exception
-   * @throws Exception exception
+   * @throws IOException exception
    */
   protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager,
       Exception e, String msg, BackupType type, Configuration conf) throws IOException {
-
     try {
       LOG.error(msg + getMessage(e), e);
       // If this is a cancel exception, then we've already cleaned.
@@ -277,16 +275,13 @@ public abstract class TableBackupClient {
     cleanupTargetDir(backupInfo, conf);
   }
 
-
-
   /**
    * Add manifest for the current backup. The manifest is stored within the table backup directory.
    * @param backupInfo The current backup info
    * @throws IOException exception
-   * @throws BackupException exception
    */
   protected void addManifest(BackupInfo backupInfo, BackupManager backupManager, BackupType type,
-      Configuration conf) throws IOException, BackupException {
+      Configuration conf) throws IOException {
     // set the overall backup phase : store manifest
     backupInfo.setPhase(BackupPhase.STORE_MANIFEST);
 
@@ -303,8 +298,7 @@ public abstract class TableBackupClient {
 
       if (type == BackupType.INCREMENTAL) {
         // We'll store the log timestamps for this table only in its manifest.
-        HashMap<TableName, HashMap<String, Long>> tableTimestampMap =
-            new HashMap<TableName, HashMap<String, Long>>();
+        HashMap<TableName, HashMap<String, Long>> tableTimestampMap = new HashMap<>();
         tableTimestampMap.put(table, backupInfo.getIncrTimestampMap().get(table));
         manifest.setIncrTimestampMap(tableTimestampMap);
         ArrayList<BackupImage> ancestorss = backupManager.getAncestors(backupInfo);
@@ -371,7 +365,7 @@ public abstract class TableBackupClient {
   /**
    * Complete the overall backup.
    * @param backupInfo backup info
-   * @throws Exception exception
+   * @throws IOException exception
    */
   protected void completeBackup(final Connection conn, BackupInfo backupInfo,
       BackupManager backupManager, BackupType type, Configuration conf) throws IOException {
@@ -412,8 +406,9 @@ public abstract class TableBackupClient {
   }
 
   /**
-   * Backup request execution
-   * @throws IOException
+   * Backup request execution.
+   *
+   * @throws IOException if the execution of the backup fails
    */
   public abstract void execute() throws IOException;
 
@@ -430,7 +425,7 @@ public abstract class TableBackupClient {
     }
   }
 
-  public static enum Stage {
+  public enum Stage {
     stage_0, stage_1, stage_2, stage_3, stage_4
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
index b1f17cf..6f2c44c 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
@@ -38,20 +38,19 @@ import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.Tool;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * MapReduce implementation of {@link BackupMergeJob}
  * Must be initialized with configuration of a backup destination cluster
  *
  */
-
 @InterfaceAudience.Private
 public class MapReduceBackupMergeJob implements BackupMergeJob {
   public static final Logger LOG = LoggerFactory.getLogger(MapReduceBackupMergeJob.class);
@@ -87,7 +86,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
       LOG.debug("Merge backup images " + bids);
     }
 
-    List<Pair<TableName, Path>> processedTableList = new ArrayList<Pair<TableName, Path>>();
+    List<Pair<TableName, Path>> processedTableList = new ArrayList<>();
     boolean finishedTables = false;
     Connection conn = ConnectionFactory.createConnection(getConf());
     BackupSystemTable table = new BackupSystemTable(conn);
@@ -104,17 +103,14 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
       String mergedBackupId = findMostRecentBackupId(backupIds);
 
       TableName[] tableNames = getTableNamesInBackupImages(backupIds);
-      String backupRoot = null;
 
       BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
-      backupRoot = bInfo.getBackupRootDir();
+      String backupRoot = bInfo.getBackupRootDir();
 
       for (int i = 0; i < tableNames.length; i++) {
-
         LOG.info("Merge backup images for " + tableNames[i]);
 
         // Find input directories for table
-
         Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
         String dirs = StringUtils.join(dirPaths, ",");
         Path bulkOutputPath =
@@ -130,16 +126,14 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
         conf.set(bulkOutputConfKey, bulkOutputPath.toString());
         String[] playerArgs = { dirs, tableNames[i].getNameAsString() };
 
-        int result = 0;
-
         player.setConf(getConf());
-        result = player.run(playerArgs);
+        int result = player.run(playerArgs);
         if (!succeeded(result)) {
           throw new IOException("Can not merge backup images for " + dirs
               + " (check Hadoop/MR and HBase logs). Player return code =" + result);
         }
         // Add to processed table list
-        processedTableList.add(new Pair<TableName, Path>(tableNames[i], bulkOutputPath));
+        processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath));
         LOG.debug("Merge Job finished:" + result);
       }
       List<TableName> tableList = toTableNameList(processedTableList);
@@ -184,7 +178,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
   }
 
   protected List<Path> toPathList(List<Pair<TableName, Path>> processedTableList) {
-    ArrayList<Path> list = new ArrayList<Path>();
+    ArrayList<Path> list = new ArrayList<>();
     for (Pair<TableName, Path> p : processedTableList) {
       list.add(p.getSecond());
     }
@@ -192,7 +186,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
   }
 
   protected List<TableName> toTableNameList(List<Pair<TableName, Path>> processedTableList) {
-    ArrayList<TableName> list = new ArrayList<TableName>();
+    ArrayList<TableName> list = new ArrayList<>();
     for (Pair<TableName, Path> p : processedTableList) {
       list.add(p.getFirst());
     }
@@ -201,7 +195,6 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
 
   protected void cleanupBulkLoadDirs(FileSystem fs, List<Path> pathList) throws IOException {
     for (Path path : pathList) {
-
       if (!fs.delete(path, true)) {
         LOG.warn("Can't delete " + path);
       }
@@ -210,18 +203,15 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
 
   protected void updateBackupManifest(String backupRoot, String mergedBackupId,
       List<String> backupsToDelete) throws IllegalArgumentException, IOException {
-
     BackupManifest manifest =
         HBackupFileSystem.getManifest(conf, new Path(backupRoot), mergedBackupId);
     manifest.getBackupImage().removeAncestors(backupsToDelete);
     // save back
     manifest.store(conf);
-
   }
 
   protected void deleteBackupImages(List<String> backupIds, Connection conn, FileSystem fs,
       String backupRoot) throws IOException {
-
     // Delete from backup system table
     try (BackupSystemTable table = new BackupSystemTable(conn)) {
       for (String backupId : backupIds) {
@@ -240,7 +230,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
   }
 
   protected List<String> getBackupIdsToDelete(String[] backupIds, String mergedBackupId) {
-    List<String> list = new ArrayList<String>();
+    List<String> list = new ArrayList<>();
     for (String id : backupIds) {
       if (id.equals(mergedBackupId)) {
         continue;
@@ -250,9 +240,8 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
     return list;
   }
 
-  protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath, TableName tableName,
-      String mergedBackupId) throws IllegalArgumentException, IOException {
-
+  protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath,
+          TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException {
     Path dest =
         new Path(HBackupFileSystem.getTableBackupDataDir(backupRoot, mergedBackupId, tableName));
 
@@ -267,7 +256,6 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
         fs.rename(fst.getPath().getParent(), dest);
       }
     }
-
   }
 
   protected String findMostRecentBackupId(String[] backupIds) {
@@ -282,8 +270,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
   }
 
   protected TableName[] getTableNamesInBackupImages(String[] backupIds) throws IOException {
-
-    Set<TableName> allSet = new HashSet<TableName>();
+    Set<TableName> allSet = new HashSet<>();
 
     try (Connection conn = ConnectionFactory.createConnection(conf);
         BackupSystemTable table = new BackupSystemTable(conn)) {
@@ -300,8 +287,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
 
   protected Path[] findInputDirectories(FileSystem fs, String backupRoot, TableName tableName,
       String[] backupIds) throws IOException {
-
-    List<Path> dirs = new ArrayList<Path>();
+    List<Path> dirs = new ArrayList<>();
 
     for (String backupId : backupIds) {
       Path fileBackupDirPath =
@@ -317,5 +303,4 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
     Path[] ret = new Path[dirs.size()];
     return dirs.toArray(ret);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
index 47bf3f9..1a3c465 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
@@ -118,7 +118,7 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool {
       try (Connection conn = ConnectionFactory.createConnection(conf);
           Table table = conn.getTable(tableName);
           RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
-          HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
+        HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
       }
       LOG.debug("success configuring load incremental job");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
index e4b63f4..1256289 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
@@ -29,12 +29,11 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.RestoreJob;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+import org.apache.hadoop.util.Tool;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-import org.apache.hadoop.util.Tool;
-
 
 /**
  * MapReduce implementation of {@link RestoreJob}
@@ -59,7 +58,6 @@ public class MapReduceRestoreJob implements RestoreJob {
   @Override
   public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNames,
       boolean fullBackupRestore) throws IOException {
-
     String bulkOutputConfKey;
 
     player = new MapReduceHFileSplitterJob();
@@ -77,7 +75,6 @@ public class MapReduceRestoreJob implements RestoreJob {
     }
 
     for (int i = 0; i < tableNames.length; i++) {
-
       LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]);
 
       Path bulkOutputPath =
@@ -85,14 +82,13 @@ public class MapReduceRestoreJob implements RestoreJob {
             getConf());
       Configuration conf = getConf();
       conf.set(bulkOutputConfKey, bulkOutputPath.toString());
-      String[] playerArgs =
-          {
-              dirs,
-              fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i]
-                  .getNameAsString() };
-
-      int result = 0;
-      int loaderResult = 0;
+      String[] playerArgs = {
+        dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i]
+              .getNameAsString()
+      };
+
+      int result;
+      int loaderResult;
       try {
 
         player.setConf(getConf());
@@ -132,5 +128,4 @@ public class MapReduceRestoreJob implements RestoreJob {
   public void setConf(Configuration conf) {
     this.conf = conf;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
index a8ece39..bd13d6e 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
@@ -30,14 +30,14 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implementation of a log cleaner that checks if a log is still scheduled for incremental backup
@@ -83,7 +83,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
       return files;
     }
 
-    List<FileStatus> list = new ArrayList<FileStatus>();
+    List<FileStatus> list = new ArrayList<>();
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       // If we do not have recorded backup sessions
       try {
@@ -116,7 +116,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
     } catch (IOException e) {
       LOG.error("Failed to get backup system table table, therefore will keep all files", e);
       // nothing to delete
-      return new ArrayList<FileStatus>();
+      return new ArrayList<>();
     }
   }
 
@@ -143,5 +143,4 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate {
   public boolean isStopped() {
     return this.stopped;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
index 5c41a3b..486b991 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java
@@ -24,12 +24,11 @@ import java.util.List;
 import java.util.concurrent.ThreadPoolExecutor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -39,12 +38,13 @@ import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-import org.apache.zookeeper.KeeperException;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
+
 /**
  * Master procedure manager for coordinated cluster-wide WAL roll operation, which is run during
  * backup operation, see {@link MasterProcedureManager} and and {@link RegionServerProcedureManager}
@@ -57,7 +57,8 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
   public static final String ROLLLOG_PROCEDURE_NAME = "rolllog";
   public static final String BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis";
   public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis";
-  public static final String BACKUP_POOL_THREAD_NUMBER_KEY = "hbase.backup.logroll.pool.thread.number";
+  public static final String BACKUP_POOL_THREAD_NUMBER_KEY =
+          "hbase.backup.logroll.pool.thread.number";
 
   public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500;
   public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000;
@@ -78,7 +79,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
 
   @Override
   public void initialize(MasterServices master, MetricsMaster metricsMaster)
-      throws KeeperException, IOException, UnsupportedOperationException {
+      throws IOException, UnsupportedOperationException {
     this.master = master;
     this.done = false;
 
@@ -118,7 +119,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
     // start the process on the RS
     ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance());
     List<ServerName> serverNames = master.getServerManager().getOnlineServersList();
-    List<String> servers = new ArrayList<String>();
+    List<String> servers = new ArrayList<>();
     for (ServerName sn : serverNames) {
       servers.add(sn.toString());
     }
@@ -161,8 +162,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager {
   }
 
   @Override
-  public boolean isProcedureDone(ProcedureDescription desc) throws IOException {
+  public boolean isProcedureDone(ProcedureDescription desc) {
     return done;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
index 4353b46..575be39 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java
@@ -52,7 +52,6 @@ public class LogRollBackupSubprocedure extends Subprocedure {
   public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member,
       ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout,
       LogRollBackupSubprocedurePool taskManager, byte[] data) {
-
     super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener,
         wakeFrequency, timeout);
     LOG.info("Constructing a LogRollBackupSubprocedure.");
@@ -82,7 +81,10 @@ public class LogRollBackupSubprocedure extends Subprocedure {
       List<WAL> wals = rss.getWALs();
       long highest = -1;
       for (WAL wal : wals) {
-        if (wal == null) continue;
+        if (wal == null) {
+          continue;
+        }
+
         if (((AbstractFSWAL<?>) wal).getFilenum() > highest) {
           highest = ((AbstractFSWAL<?>) wal).getFilenum();
         }
@@ -109,7 +111,8 @@ public class LogRollBackupSubprocedure extends Subprocedure {
         String server = host + ":" + port;
         Long sts = serverTimestampMap.get(host);
         if (sts != null && sts > highest) {
-          LOG.warn("Won't update server's last roll log result: current=" + sts + " new=" + highest);
+          LOG.warn("Won't update server's last roll log result: current=" + sts + " new="
+                  + highest);
           return null;
         }
         // write the log number to backup system table.
@@ -131,11 +134,10 @@ public class LogRollBackupSubprocedure extends Subprocedure {
     // wait for everything to complete.
     taskManager.waitForOutstandingTasks();
     monitor.rethrowException();
-
   }
 
   @Override
-  public void acquireBarrier() throws ForeignException {
+  public void acquireBarrier() {
     // do nothing, executing in inside barrier step.
   }
 
@@ -163,5 +165,4 @@ public class LogRollBackupSubprocedure extends Subprocedure {
   public void releaseBarrier() {
     // NO OP
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
index 3363638..0a05157 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java
@@ -32,10 +32,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.DaemonThreadFactory;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.errorhandling.ForeignException;
 
 /**
  * Handle running each of the individual tasks for completing a backup procedure on a region
@@ -52,7 +52,7 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
   private final ExecutorCompletionService<Void> taskPool;
   private final ThreadPoolExecutor executor;
   private volatile boolean aborted;
-  private final List<Future<Void>> futures = new ArrayList<Future<Void>>();
+  private final List<Future<Void>> futures = new ArrayList<>();
   private final String name;
 
   public LogRollBackupSubprocedurePool(String name, Configuration conf) {
@@ -64,9 +64,9 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable {
     this.name = name;
     executor =
         new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS,
-            new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs(" + name
+            new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name
                 + ")-backup-pool"));
-    taskPool = new ExecutorCompletionService<Void>(executor);
+    taskPool = new ExecutorCompletionService<>(executor);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
index 82d9dcf..6c743a8 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.procedure.ProcedureMember;
 import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
@@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
 import org.apache.hadoop.hbase.procedure.Subprocedure;
 import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -52,7 +52,6 @@ import org.slf4j.LoggerFactory;
  */
 @InterfaceAudience.Private
 public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager {
-
   private static final Logger LOG =
       LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class);
 
@@ -120,7 +119,6 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
    * @return Subprocedure to submit to the ProcedureMemeber.
    */
   public Subprocedure buildSubprocedure(byte[] data) {
-
     // don't run a backup if the parent is stop(ping)
     if (rss.isStopping() || rss.isStopped()) {
       throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName()
@@ -138,14 +136,12 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
         new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf);
     return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis,
         taskManager, data);
-
   }
 
   /**
    * Build the actual backup procedure runner that will do all the 'hard' work
    */
   public class BackupSubprocedureBuilder implements SubprocedureFactory {
-
     @Override
     public Subprocedure buildSubprocedure(String name, byte[] data) {
       return LogRollRegionServerProcedureManager.this.buildSubprocedure(data);
@@ -178,5 +174,4 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa
   public String getProcedureSignature() {
     return "backup-proc";
   }
-
 }


[3/3] hbase git commit: HBASE-19765 Fixed Checkstyle errors in hbase-backup

Posted by ja...@apache.org.
HBASE-19765 Fixed Checkstyle errors in hbase-backup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c2236b77
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c2236b77
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c2236b77

Branch: refs/heads/master
Commit: c2236b77cb04cf7b92576113a7c9b87b244ff2e4
Parents: f1502a3
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sat Jan 27 02:07:52 2018 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Sun Jan 28 13:45:45 2018 +0100

----------------------------------------------------------------------
 .../hbase/backup/BackupClientFactory.java       |   9 +-
 .../hadoop/hbase/backup/BackupCopyJob.java      |   3 +-
 .../hadoop/hbase/backup/BackupHFileCleaner.java |  36 ++--
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  27 ++-
 .../hadoop/hbase/backup/BackupMergeJob.java     |   6 +-
 .../hbase/backup/BackupRestoreConstants.java    | 124 ++++++-----
 .../hbase/backup/BackupRestoreFactory.java      |   3 +-
 .../hadoop/hbase/backup/BackupTableInfo.java    |   5 +-
 .../hadoop/hbase/backup/HBackupFileSystem.java  |   6 +-
 .../apache/hadoop/hbase/backup/LogUtils.java    |   3 +-
 .../hadoop/hbase/backup/RestoreDriver.java      |  22 +-
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   3 +-
 .../hadoop/hbase/backup/RestoreRequest.java     |   4 +-
 .../hbase/backup/impl/BackupAdminImpl.java      | 158 ++++++--------
 .../hbase/backup/impl/BackupCommands.java       | 205 ++++++++++---------
 .../hadoop/hbase/backup/impl/BackupManager.java |  28 +--
 .../hbase/backup/impl/BackupManifest.java       |  44 ++--
 .../hbase/backup/impl/BackupSystemTable.java    | 146 ++++++-------
 .../backup/impl/FullTableBackupClient.java      |  26 ++-
 .../backup/impl/IncrementalBackupManager.java   |  37 ++--
 .../impl/IncrementalTableBackupClient.java      |  47 ++---
 .../hbase/backup/impl/RestoreTablesClient.java  |  37 ++--
 .../hbase/backup/impl/TableBackupClient.java    |  35 ++--
 .../mapreduce/MapReduceBackupMergeJob.java      |  43 ++--
 .../mapreduce/MapReduceHFileSplitterJob.java    |   2 +-
 .../backup/mapreduce/MapReduceRestoreJob.java   |  23 +--
 .../hbase/backup/master/BackupLogCleaner.java   |  11 +-
 .../master/LogRollMasterProcedureManager.java   |  20 +-
 .../regionserver/LogRollBackupSubprocedure.java |  13 +-
 .../LogRollBackupSubprocedurePool.java          |   8 +-
 .../LogRollRegionServerProcedureManager.java    |   7 +-
 .../hadoop/hbase/backup/util/BackupUtils.java   | 108 +++++-----
 .../hadoop/hbase/backup/util/RestoreTool.java   |  49 ++---
 .../hadoop/hbase/backup/TestBackupBase.java     |  54 +++--
 .../hbase/backup/TestBackupBoundaryTests.java   |  19 +-
 .../hadoop/hbase/backup/TestBackupDelete.java   |  12 +-
 .../hbase/backup/TestBackupDeleteRestore.java   |   5 +-
 .../backup/TestBackupDeleteWithFailures.java    |  47 ++---
 .../hadoop/hbase/backup/TestBackupDescribe.java |  10 +-
 .../hbase/backup/TestBackupHFileCleaner.java    |  12 +-
 .../hbase/backup/TestBackupShowHistory.java     |  35 ++--
 .../hbase/backup/TestBackupStatusProgress.java  |   7 +-
 .../hbase/backup/TestBackupSystemTable.java     |  16 +-
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  10 +-
 .../hadoop/hbase/backup/TestFullRestore.java    |  53 +++--
 .../TestIncrementalBackupMergeWithFailures.java |  33 +--
 .../TestIncrementalBackupWithBulkLoad.java      |  20 +-
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  43 ++--
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   9 +-
 .../backup/TestRepairAfterFailedDelete.java     |   5 +-
 .../hbase/backup/TestRestoreBoundaryTests.java  |   9 +-
 .../hbase/backup/TestSystemTableSnapshot.java   |   8 +-
 52 files changed, 766 insertions(+), 939 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
index 68e5c11..4c96229 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
@@ -28,11 +28,12 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
-public class BackupClientFactory {
+public final class BackupClientFactory {
+  private BackupClientFactory() {
+  }
 
-  public static TableBackupClient create (Connection conn, String backupId, BackupRequest request)
-    throws IOException
-  {
+  public static TableBackupClient create(Connection conn, String backupId, BackupRequest request)
+    throws IOException {
     Configuration conf = conn.getConfiguration();
     try {
       String clsName = conf.get(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
index 3385f1d..f5e2137 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
@@ -32,7 +32,6 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public interface BackupCopyJob extends Configurable {
-
   /**
    * Copy backup data to destination
    * @param backupInfo context object
@@ -49,7 +48,7 @@ public interface BackupCopyJob extends Configurable {
   /**
    * Cancel copy job
    * @param jobHandler backup copy job handler
-   * @throws IOException
+   * @throws IOException if cancelling the jobs fails
    */
   void cancel(String jobHandler) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
index da4f56e..68e6b43 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
@@ -31,16 +31,15 @@ import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.base.Predicate;
 import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
 
 /**
@@ -61,9 +60,12 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
   private List<TableName> fullyBackedUpTables = null;
 
   private Set<String> getFilenameFromBulkLoad(Map<byte[], List<Path>>[] maps) {
-    Set<String> filenames = new HashSet<String>();
+    Set<String> filenames = new HashSet<>();
     for (Map<byte[], List<Path>> map : maps) {
-      if (map == null) continue;
+      if (map == null) {
+        continue;
+      }
+
       for (List<Path> paths : map.values()) {
         for (Path p : paths) {
           filenames.add(p.getName());
@@ -98,7 +100,10 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
     // obtain the Set of TableName's which have been fully backed up
     // so that we filter BulkLoad to be returned from server
     if (checkForFullyBackedUpTables) {
-      if (connection == null) return files;
+      if (connection == null) {
+        return files;
+      }
+
       try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
         fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
       } catch (IOException ioe) {
@@ -114,17 +119,14 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor
       LOG.error("Failed to read hfile references, skipping checking deletable files", ioe);
       return Collections.emptyList();
     }
-    Iterable<FileStatus> deletables = Iterables.filter(files, new Predicate<FileStatus>() {
-      @Override
-      public boolean apply(FileStatus file) {
-        // If the file is recent, be conservative and wait for one more scan of backup:system table
-        if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
-          return false;
-        }
-        String hfile = file.getPath().getName();
-        boolean foundHFileRef = hfileRefs.contains(hfile);
-        return !foundHFileRef;
+    Iterable<FileStatus> deletables = Iterables.filter(files, file -> {
+      // If the file is recent, be conservative and wait for one more scan of backup:system table
+      if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
+        return false;
       }
+      String hfile = file.getPath().getName();
+      boolean foundHFileRef = hfileRefs.contains(hfile);
+      return !foundHFileRef;
     });
     return deletables;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
index f6935f8..6c304ca 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
@@ -32,14 +32,14 @@ import java.util.Set;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
-import org.apache.hadoop.hbase.util.Bytes;
-
 
 /**
  * An object to encapsulate the information for each backup session
@@ -48,20 +48,19 @@ import org.apache.hadoop.hbase.util.Bytes;
 public class BackupInfo implements Comparable<BackupInfo> {
   private static final Logger LOG = LoggerFactory.getLogger(BackupInfo.class);
 
-  public static interface Filter {
-
+  public interface Filter {
     /**
      * Filter interface
      * @param info backup info
      * @return true if info passes filter, false otherwise
      */
-    public boolean apply(BackupInfo info);
+    boolean apply(BackupInfo info);
   }
 
   /**
    * Backup session states
    */
-  public static enum BackupState {
+  public enum BackupState {
     RUNNING, COMPLETE, FAILED, ANY
   }
 
@@ -69,7 +68,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
    * BackupPhase - phases of an ACTIVE backup session (running), when state of a backup session is
    * BackupState.RUNNING
    */
-  public static enum BackupPhase {
+  public enum BackupPhase {
     REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST
   }
 
@@ -155,7 +154,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
   private long bandwidth = -1;
 
   public BackupInfo() {
-    backupTableInfoMap = new HashMap<TableName, BackupTableInfo>();
+    backupTableInfoMap = new HashMap<>();
   }
 
   public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) {
@@ -196,7 +195,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
   }
 
   public void setTableSetTimestampMap(HashMap<TableName,
-                                      HashMap<String, Long>> tableSetTimestampMap) {
+          HashMap<String, Long>> tableSetTimestampMap) {
     this.tableSetTimestampMap = tableSetTimestampMap;
   }
 
@@ -216,7 +215,6 @@ public class BackupInfo implements Comparable<BackupInfo> {
    * Set progress (0-100%)
    * @param p progress value
    */
-
   public void setProgress(int p) {
     this.progress = p;
   }
@@ -297,7 +295,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
   }
 
   public List<String> getSnapshotNames() {
-    List<String> snapshotNames = new ArrayList<String>();
+    List<String> snapshotNames = new ArrayList<>();
     for (BackupTableInfo backupStatus : this.backupTableInfoMap.values()) {
       snapshotNames.add(backupStatus.getSnapshotName());
     }
@@ -309,7 +307,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
   }
 
   public List<TableName> getTableNames() {
-    return new ArrayList<TableName>(backupTableInfoMap.keySet());
+    return new ArrayList<>(backupTableInfoMap.keySet());
   }
 
   public void addTables(TableName[] tables) {
@@ -355,8 +353,8 @@ public class BackupInfo implements Comparable<BackupInfo> {
    * Set the new region server log timestamps after distributed log roll
    * @param newTableSetTimestampMap table timestamp map
    */
-  public void
-      setIncrTimestampMap(HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap) {
+  public void setIncrTimestampMap(HashMap<TableName,
+          HashMap<String, Long>> newTableSetTimestampMap) {
     this.tableSetTimestampMap = newTableSetTimestampMap;
   }
 
@@ -546,5 +544,4 @@ public class BackupInfo implements Comparable<BackupInfo> {
     Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
     return thisTS.compareTo(otherTS);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
index dc2e85b..de91fa1 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java
@@ -30,11 +30,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public interface BackupMergeJob extends Configurable {
-
   /**
-   * Run backup merge operation
+   * Run backup merge operation.
+   *
    * @param backupIds backup image ids
-   * @throws IOException
+   * @throws IOException if the backup merge operation fails
    */
   void run(String[] backupIds) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
index 3ea8c56..eaeef22 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -29,102 +29,96 @@ public interface BackupRestoreConstants {
   /*
    * Backup/Restore constants
    */
-  public final static String BACKUP_SYSTEM_TABLE_NAME_KEY = "hbase.backup.system.table.name";
-  public final static String BACKUP_SYSTEM_TABLE_NAME_DEFAULT = "backup:system";
+  String BACKUP_SYSTEM_TABLE_NAME_KEY = "hbase.backup.system.table.name";
+  String BACKUP_SYSTEM_TABLE_NAME_DEFAULT = "backup:system";
 
-  public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
+  String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
 
-  public final static int BACKUP_SYSTEM_TTL_DEFAULT = HConstants.FOREVER;
-  public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
-  public final static boolean BACKUP_ENABLE_DEFAULT = false;
+  int BACKUP_SYSTEM_TTL_DEFAULT = HConstants.FOREVER;
+  String BACKUP_ENABLE_KEY = "hbase.backup.enable";
+  boolean BACKUP_ENABLE_DEFAULT = false;
 
+  String BACKUP_MAX_ATTEMPTS_KEY = "hbase.backup.attempts.max";
+  int DEFAULT_BACKUP_MAX_ATTEMPTS = 10;
 
-  public static final String BACKUP_MAX_ATTEMPTS_KEY = "hbase.backup.attempts.max";
-  public static final int DEFAULT_BACKUP_MAX_ATTEMPTS = 10;
-
-  public static final String BACKUP_ATTEMPTS_PAUSE_MS_KEY = "hbase.backup.attempts.pause.ms";
-  public static final int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
+  String BACKUP_ATTEMPTS_PAUSE_MS_KEY = "hbase.backup.attempts.pause.ms";
+  int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000;
 
   /*
    *  Drivers option list
    */
-  public static final String OPTION_OVERWRITE = "o";
-  public static final String OPTION_OVERWRITE_DESC =
-      "Overwrite data if any of the restore target tables exists";
+  String OPTION_OVERWRITE = "o";
+  String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists";
 
-  public static final String OPTION_CHECK = "c";
-  public static final String OPTION_CHECK_DESC =
+  String OPTION_CHECK = "c";
+  String OPTION_CHECK_DESC =
       "Check restore sequence and dependencies only (does not execute the command)";
 
-  public static final String OPTION_SET = "s";
-  public static final String OPTION_SET_DESC = "Backup set name";
-  public static final String OPTION_SET_RESTORE_DESC =
-      "Backup set to restore, mutually exclusive with -t (table list)";
-  public static final String OPTION_SET_BACKUP_DESC =
-      "Backup set to backup, mutually exclusive with -t (table list)";
-  public static final String OPTION_DEBUG = "d";
-  public static final String OPTION_DEBUG_DESC = "Enable debug loggings";
-
-  public static final String OPTION_TABLE = "t";
-  public static final String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
+  String OPTION_SET = "s";
+  String OPTION_SET_DESC = "Backup set name";
+  String OPTION_SET_RESTORE_DESC = "Backup set to restore, mutually exclusive with -t (table list)";
+  String OPTION_SET_BACKUP_DESC = "Backup set to backup, mutually exclusive with -t (table list)";
+  String OPTION_DEBUG = "d";
+  String OPTION_DEBUG_DESC = "Enable debug loggings";
+
+  String OPTION_TABLE = "t";
+  String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
       + " which contain this table will be listed.";
 
-  public static final String OPTION_TABLE_LIST = "l";
-  public static final String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
+  String OPTION_TABLE_LIST = "l";
+  String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated.";
 
-  public static final String OPTION_BANDWIDTH = "b";
-  public static final String OPTION_BANDWIDTH_DESC = "Bandwidth per task (MapReduce task) in MB/s";
+  String OPTION_BANDWIDTH = "b";
+  String OPTION_BANDWIDTH_DESC = "Bandwidth per task (MapReduce task) in MB/s";
 
-  public static final String OPTION_WORKERS = "w";
-  public static final String OPTION_WORKERS_DESC = "Number of parallel MapReduce tasks to execute";
+  String OPTION_WORKERS = "w";
+  String OPTION_WORKERS_DESC = "Number of parallel MapReduce tasks to execute";
 
-  public static final String OPTION_RECORD_NUMBER = "n";
-  public static final String OPTION_RECORD_NUMBER_DESC =
-      "Number of records of backup history. Default: 10";
+  String OPTION_RECORD_NUMBER = "n";
+  String OPTION_RECORD_NUMBER_DESC = "Number of records of backup history. Default: 10";
 
-  public static final String OPTION_PATH = "p";
-  public static final String OPTION_PATH_DESC = "Backup destination root directory path";
+  String OPTION_PATH = "p";
+  String OPTION_PATH_DESC = "Backup destination root directory path";
 
-  public static final String OPTION_TABLE_MAPPING = "m";
-  public static final String OPTION_TABLE_MAPPING_DESC =
+  String OPTION_TABLE_MAPPING = "m";
+  String OPTION_TABLE_MAPPING_DESC =
       "A comma separated list of target tables. "
           + "If specified, each table in <tables> must have a mapping";
-  public static final String OPTION_YARN_QUEUE_NAME = "q";
-  public static final String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
-  public static final String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
-
-  public final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
-
-  public static final String BACKUP_CONFIG_STRING =  BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n"
-      + "hbase.master.logcleaner.plugins="
-      +"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
-      + "hbase.procedure.master.classes=YOUR_CLASSES,"
-      +"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
-      + "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
-      + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
-      + "hbase.coprocessor.region.classes=YOUR_CLASSES,"
-      + "org.apache.hadoop.hbase.backup.BackupObserver\n"
-      + "and restart the cluster\n";
-  public static final String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
+  String OPTION_YARN_QUEUE_NAME = "q";
+  String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on";
+  String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on";
+
+  String JOB_NAME_CONF_KEY = "mapreduce.job.name";
+
+  String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY
+          + "=true\n"
+          + "hbase.master.logcleaner.plugins="
+          +"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+          + "hbase.procedure.master.classes=YOUR_CLASSES,"
+          +"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+          + "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+          + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+          + "hbase.coprocessor.region.classes=YOUR_CLASSES,"
+          + "org.apache.hadoop.hbase.backup.BackupObserver\n"
+          + "and restart the cluster\n";
+  String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+
       "in hbase-site.xml, set:\n "
       + BACKUP_CONFIG_STRING;
 
-  public static final String VERIFY_BACKUP = "Please make sure that backup is enabled on the cluster. To enable backup, "+
-      "in hbase-site.xml, set:\n "
-      + BACKUP_CONFIG_STRING;
+  String VERIFY_BACKUP = "Please make sure that backup is enabled on the cluster. To enable "
+          + "backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING;
 
   /*
    *  Delimiter in table name list in restore command
    */
-  public static final String TABLENAME_DELIMITER_IN_COMMAND = ",";
+  String TABLENAME_DELIMITER_IN_COMMAND = ",";
 
-  public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root";
+  String CONF_STAGING_ROOT = "snapshot.export.staging.root";
 
-  public static final String BACKUPID_PREFIX = "backup_";
+  String BACKUPID_PREFIX = "backup_";
 
-  public static enum BackupCommand {
+  enum BackupCommand {
     CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS,
     SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST, REPAIR
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
index 9612ebe..b1bc532 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
@@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob;
 import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
 import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreJob;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Factory implementation for backup/restore related jobs
@@ -30,7 +30,6 @@ import org.apache.hadoop.util.ReflectionUtils;
  */
 @InterfaceAudience.Private
 public final class BackupRestoreFactory {
-
   public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
   public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
   public final static String HBASE_BACKUP_MERGE_IMPL_CLASS = "hbase.backup.merge.class";

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
index 5f221a5..50abcc8 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 
 @InterfaceAudience.Private
 public class BackupTableInfo  {
-
   /*
    *  Table name for backup
    */
@@ -39,11 +38,9 @@ public class BackupTableInfo  {
   /*
    *  Snapshot name for offline/online snapshot
    */
-
   private String snapshotName = null;
 
   public BackupTableInfo() {
-
   }
 
   public BackupTableInfo(TableName table, String targetRootDir, String backupId) {
@@ -76,7 +73,7 @@ public class BackupTableInfo  {
     if (snapshotName != null) {
       builder.setSnapshotName(snapshotName);
     }
-    builder.setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table));
+    builder.setTableName(ProtobufUtil.toProtoTableName(table));
     return builder.build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
index 68dba54..ff65a64 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory;
  * the on-disk Backup Image data.
  */
 @InterfaceAudience.Private
-public class HBackupFileSystem {
+public final class HBackupFileSystem {
   public static final Logger LOG = LoggerFactory.getLogger(HBackupFileSystem.class);
 
   /**
@@ -106,10 +106,8 @@ public class HBackupFileSystem {
   // Move manifest file to other place
   private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
       throws IOException {
-    Path manifestPath = null;
-
     FileSystem fs = backupRootPath.getFileSystem(conf);
-    manifestPath =
+    Path manifestPath =
         new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
             + BackupManifest.MANIFEST_FILE_NAME);
     if (!fs.exists(manifestPath)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
index 5ce975f..b96e3c7 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hbase.backup;
 
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Utility class for disabling Zk and client logging
@@ -28,7 +28,6 @@ import org.apache.log4j.Logger;
  */
 @InterfaceAudience.Private
 final class LogUtils {
-
   private LogUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
index 2903bb0..b6c324d 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
@@ -47,9 +47,6 @@ import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
@@ -57,6 +54,9 @@ import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  *
@@ -65,7 +65,6 @@ import org.apache.log4j.LogManager;
  */
 @InterfaceAudience.Private
 public class RestoreDriver extends AbstractHBaseTool {
-
   private static final Logger LOG = LoggerFactory.getLogger(RestoreDriver.class);
   private CommandLine cmd;
 
@@ -81,7 +80,7 @@ public class RestoreDriver extends AbstractHBaseTool {
     init();
   }
 
-  protected void init() throws IOException {
+  protected void init() {
     // disable irrelevant loggers to avoid it mess up command output
     LogUtils.disableZkAndClientLoggers();
   }
@@ -142,7 +141,7 @@ public class RestoreDriver extends AbstractHBaseTool {
 
     String backupRootDir = remainArgs[0];
     String backupId = remainArgs[1];
-    String tables = null;
+    String tables;
     String tableMapping =
         cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
     try (final Connection conn = ConnectionFactory.createConnection(conf);
@@ -190,7 +189,11 @@ public class RestoreDriver extends AbstractHBaseTool {
       throws IOException {
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       List<TableName> tables = table.describeBackupSet(name);
-      if (tables == null) return null;
+
+      if (tables == null) {
+        return null;
+      }
+
       return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
     }
   }
@@ -205,7 +208,6 @@ public class RestoreDriver extends AbstractHBaseTool {
     addOptWithArg(OPTION_TABLE, OPTION_TABLE_LIST_DESC);
     addOptWithArg(OPTION_TABLE_MAPPING, OPTION_TABLE_MAPPING_DESC);
     addOptWithArg(OPTION_YARN_QUEUE_NAME, OPTION_YARN_QUEUE_NAME_RESTORE_DESC);
-
   }
 
   @Override
@@ -228,7 +230,7 @@ public class RestoreDriver extends AbstractHBaseTool {
   }
 
   @Override
-  public int run(String[] args) throws IOException {
+  public int run(String[] args) {
     if (conf == null) {
       LOG.error("Tool configuration is not initialized");
       throw new NullPointerException("conf");
@@ -262,7 +264,7 @@ public class RestoreDriver extends AbstractHBaseTool {
     return ret;
   }
 
-  protected void printToolUsage() throws IOException {
+  protected void printToolUsage() {
     System.out.println(USAGE_STRING);
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.setLeftPadding(2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
index ca57e59..29b1288 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
@@ -32,14 +32,13 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public interface RestoreJob extends Configurable {
-
   /**
    * Run restore operation
    * @param dirPaths path array of WAL log directories
    * @param fromTables from tables
    * @param toTables to tables
    * @param fullBackupRestore full backup restore
-   * @throws IOException
+   * @throws IOException if running the job fails
    */
   void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables,
       boolean fullBackupRestore) throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
index 5a0a7d4..a654cce 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
@@ -24,8 +24,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * POJO class for restore request
  */
 @InterfaceAudience.Private
-public class RestoreRequest {
-
+public final class RestoreRequest {
   public static class Builder {
     RestoreRequest request;
 
@@ -63,7 +62,6 @@ public class RestoreRequest {
       return this;
     }
 
-
     public RestoreRequest build() {
       return request;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
index f27490c..776cc1b 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -44,13 +44,14 @@ import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.RestoreRequest;
 import org.apache.hadoop.hbase.backup.util.BackupSet;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 @InterfaceAudience.Private
 public class BackupAdminImpl implements BackupAdmin {
@@ -66,12 +67,12 @@ public class BackupAdminImpl implements BackupAdmin {
   }
 
   @Override
-  public void close() throws IOException {
+  public void close() {
   }
 
   @Override
   public BackupInfo getBackupInfo(String backupId) throws IOException {
-    BackupInfo backupInfo = null;
+    BackupInfo backupInfo;
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       if (backupId == null) {
         ArrayList<BackupInfo> recentSessions = table.getBackupInfos(BackupState.RUNNING);
@@ -93,12 +94,11 @@ public class BackupAdminImpl implements BackupAdmin {
   public int deleteBackups(String[] backupIds) throws IOException {
 
     int totalDeleted = 0;
-    Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
+    Map<String, HashSet<TableName>> allTablesMap = new HashMap<>();
 
-    boolean deleteSessionStarted = false;
-    boolean snapshotDone = false;
+    boolean deleteSessionStarted;
+    boolean snapshotDone;
     try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
-
       // Step 1: Make sure there is no active session
       // is running by using startBackupSession API
       // If there is an active session in progress, exception will be thrown
@@ -107,8 +107,8 @@ public class BackupAdminImpl implements BackupAdmin {
         deleteSessionStarted = true;
       } catch (IOException e) {
         LOG.warn("You can not run delete command while active backup session is in progress. \n"
-            + "If there is no active backup session running, run backup repair utility to restore \n"
-            + "backup system integrity.");
+            + "If there is no active backup session running, run backup repair utility to "
+            + "restore \nbackup system integrity.");
         return -1;
       }
 
@@ -136,7 +136,7 @@ public class BackupAdminImpl implements BackupAdmin {
             String rootDir = info.getBackupRootDir();
             HashSet<TableName> allTables = allTablesMap.get(rootDir);
             if (allTables == null) {
-              allTables = new HashSet<TableName>();
+              allTables = new HashSet<>();
               allTablesMap.put(rootDir, allTables);
             }
             allTables.addAll(info.getTableNames());
@@ -176,11 +176,10 @@ public class BackupAdminImpl implements BackupAdmin {
 
   /**
    * Updates incremental backup set for every backupRoot
-   * @param tablesMap map [backupRoot: Set<TableName>]
+   * @param tablesMap map [backupRoot: {@code Set<TableName>}]
    * @param table backup system table
-   * @throws IOException
+   * @throws IOException if a table operation fails
    */
-
   private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
       throws IOException {
     for (String backupRoot : tablesMap.keySet()) {
@@ -213,15 +212,15 @@ public class BackupAdminImpl implements BackupAdmin {
    * which includes T<br>
    * if(INCREMENTAL, YES) deletes only physical data (PD) if(INCREMENTAL, NO) deletes physical data
    * and for table T scans all backup images between last<br>
-   * FULL backup, which is older than the backup being deleted and the next FULL backup (if exists) <br>
+   * FULL backup, which is older than the backup being deleted and the next FULL backup (if exists)
+   * <br>
    * or last one for a particular table T and removes T from list of backup tables.
    * @param backupId backup id
    * @param sysTable backup system table
    * @return total number of deleted backup images
-   * @throws IOException
+   * @throws IOException if deleting the backup fails
    */
   private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
-
     BackupInfo backupInfo = sysTable.readBackupInfo(backupId);
 
     int totalDeleted = 0;
@@ -271,7 +270,7 @@ public class BackupAdminImpl implements BackupAdmin {
         LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted");
       }
       if (success) {
-        sysTable.deleteBulkLoadedRows(new ArrayList<byte[]>(map.keySet()));
+        sysTable.deleteBulkLoadedRows(new ArrayList<>(map.keySet()));
       }
 
       sysTable.deleteBackupInfo(backupInfo.getBackupId());
@@ -283,8 +282,7 @@ public class BackupAdminImpl implements BackupAdmin {
     return totalDeleted;
   }
 
-  private void
-      removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
+  private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
           throws IOException {
     List<TableName> tables = info.getTableNames();
     LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables="
@@ -311,7 +309,7 @@ public class BackupAdminImpl implements BackupAdmin {
       BackupSystemTable table) throws IOException {
     LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
     long ts = backupInfo.getStartTs();
-    List<BackupInfo> list = new ArrayList<BackupInfo>();
+    List<BackupInfo> list = new ArrayList<>();
     List<BackupInfo> history = table.getBackupHistory(backupInfo.getBackupRootDir());
     // Scan from most recent to backupInfo
     // break when backupInfo reached
@@ -337,7 +335,7 @@ public class BackupAdminImpl implements BackupAdmin {
 
   /**
    * Clean up the data at target directory
-   * @throws IOException
+   * @throws IOException if cleaning up the backup directory fails
    */
   private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
       throws IOException {
@@ -359,7 +357,6 @@ public class BackupAdminImpl implements BackupAdmin {
       } else {
         LOG.info("No data has been found in " + targetDirPath.toString() + ".");
       }
-
     } catch (IOException e1) {
       LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
           + "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
@@ -375,11 +372,7 @@ public class BackupAdminImpl implements BackupAdmin {
       if (!tables.contains(tn)) {
         continue;
       }
-      if (info.getStartTs() <= startTime) {
-        return true;
-      } else {
-        return false;
-      }
+      return info.getStartTs() <= startTime;
     }
     return false;
   }
@@ -388,8 +381,12 @@ public class BackupAdminImpl implements BackupAdmin {
   public List<BackupInfo> getHistory(int n) throws IOException {
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       List<BackupInfo> history = table.getBackupHistory();
-      if (history.size() <= n) return history;
-      List<BackupInfo> list = new ArrayList<BackupInfo>();
+
+      if (history.size() <= n) {
+        return history;
+      }
+
+      List<BackupInfo> list = new ArrayList<>();
       for (int i = 0; i < n; i++) {
         list.add(history.get(i));
       }
@@ -399,12 +396,18 @@ public class BackupAdminImpl implements BackupAdmin {
 
   @Override
   public List<BackupInfo> getHistory(int n, BackupInfo.Filter... filters) throws IOException {
-    if (filters.length == 0) return getHistory(n);
+    if (filters.length == 0) {
+      return getHistory(n);
+    }
+
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       List<BackupInfo> history = table.getBackupHistory();
-      List<BackupInfo> result = new ArrayList<BackupInfo>();
+      List<BackupInfo> result = new ArrayList<>();
       for (BackupInfo bi : history) {
-        if (result.size() == n) break;
+        if (result.size() == n) {
+          break;
+        }
+
         boolean passed = true;
         for (int i = 0; i < filters.length; i++) {
           if (!filters[i].apply(bi)) {
@@ -424,7 +427,7 @@ public class BackupAdminImpl implements BackupAdmin {
   public List<BackupSet> listBackupSets() throws IOException {
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       List<String> list = table.listBackupSets();
-      List<BackupSet> bslist = new ArrayList<BackupSet>();
+      List<BackupSet> bslist = new ArrayList<>();
       for (String s : list) {
         List<TableName> tables = table.describeBackupSet(s);
         if (tables != null) {
@@ -439,7 +442,11 @@ public class BackupAdminImpl implements BackupAdmin {
   public BackupSet getBackupSet(String name) throws IOException {
     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
       List<TableName> list = table.describeBackupSet(name);
-      if (list == null) return null;
+
+      if (list == null) {
+        return null;
+      }
+
       return new BackupSet(name, list);
     }
   }
@@ -502,7 +509,6 @@ public class BackupAdminImpl implements BackupAdmin {
         conn.getConfiguration(), rootPath, backupId);
 
       // Check and validate the backup image and its dependencies
-
       if (BackupUtils.validate(backupManifestMap, conn.getConfiguration())) {
         LOG.info(CHECK_OK);
       } else {
@@ -522,14 +528,13 @@ public class BackupAdminImpl implements BackupAdmin {
 
     String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
     if (type == BackupType.INCREMENTAL) {
-      Set<TableName> incrTableSet = null;
+      Set<TableName> incrTableSet;
       try (BackupSystemTable table = new BackupSystemTable(conn)) {
         incrTableSet = table.getIncrementalBackupTableSet(targetRootDir);
       }
 
       if (incrTableSet.isEmpty()) {
-        String msg =
-            "Incremental backup table set contains no tables. "
+        String msg = "Incremental backup table set contains no tables. "
                 + "You need to run full backup first "
                 + (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
 
@@ -539,8 +544,7 @@ public class BackupAdminImpl implements BackupAdmin {
         tableList.removeAll(incrTableSet);
         if (!tableList.isEmpty()) {
           String extraTables = StringUtils.join(tableList, ",");
-          String msg =
-              "Some tables (" + extraTables + ") haven't gone through full backup. "
+          String msg = "Some tables (" + extraTables + ") haven't gone through full backup. "
                   + "Perform full backup on " + extraTables + " first, " + "then retry the command";
           throw new IOException(msg);
         }
@@ -585,13 +589,12 @@ public class BackupAdminImpl implements BackupAdmin {
 
     // update table list
     BackupRequest.Builder builder = new BackupRequest.Builder();
-    request =
-        builder.withBackupType(request.getBackupType()).withTableList(tableList)
+    request = builder.withBackupType(request.getBackupType()).withTableList(tableList)
             .withTargetRootDir(request.getTargetRootDir())
             .withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks())
             .withBandwidthPerTasks((int) request.getBandwidth()).build();
 
-    TableBackupClient client = null;
+    TableBackupClient client;
     try {
       client = BackupClientFactory.create(conn, backupId, request);
     } catch (IOException e) {
@@ -606,7 +609,6 @@ public class BackupAdminImpl implements BackupAdmin {
 
   private List<TableName> excludeNonExistingTables(List<TableName> tableList,
       List<TableName> nonExistingTableList) {
-
     for (TableName table : nonExistingTableList) {
       tableList.remove(table);
     }
@@ -634,13 +636,14 @@ public class BackupAdminImpl implements BackupAdmin {
    * <p>
    * @param backupIds list of backup ids
    * @param table backup system table
-   * @throws IOException
+   * @throws IOException if the backup image is not valid for merge
    */
-  private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) throws IOException {
+  private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table)
+          throws IOException {
     String backupRoot = null;
 
-    final Set<TableName> allTables = new HashSet<TableName>();
-    final Set<String> allBackups = new HashSet<String>();
+    final Set<TableName> allTables = new HashSet<>();
+    final Set<String> allBackups = new HashSet<>();
     long minTime = Long.MAX_VALUE, maxTime = Long.MIN_VALUE;
     for (String backupId : backupIds) {
       BackupInfo bInfo = table.readBackupInfo(backupId);
@@ -651,8 +654,8 @@ public class BackupAdminImpl implements BackupAdmin {
       if (backupRoot == null) {
         backupRoot = bInfo.getBackupRootDir();
       } else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
-        throw new IOException("Found different backup destinations in a list of a backup sessions \n"
-            + "1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
+        throw new IOException("Found different backup destinations in a list of a backup sessions "
+                + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
       }
       if (bInfo.getType() == BackupType.FULL) {
         throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
@@ -673,7 +676,6 @@ public class BackupAdminImpl implements BackupAdmin {
       }
     }
 
-
     final long startRangeTime  = minTime;
     final long endRangeTime = maxTime;
     final String backupDest = backupRoot;
@@ -681,54 +683,26 @@ public class BackupAdminImpl implements BackupAdmin {
     // Filter 1 : backupRoot
     // Filter 2 : time range filter
     // Filter 3 : table filter
+    BackupInfo.Filter destinationFilter = info -> info.getBackupRootDir().equals(backupDest);
 
-    BackupInfo.Filter destinationFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        return info.getBackupRootDir().equals(backupDest);
-      }
+    BackupInfo.Filter timeRangeFilter = info -> {
+      long time = info.getStartTs();
+      return time >= startRangeTime && time <= endRangeTime ;
     };
 
-    BackupInfo.Filter timeRangeFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        long time = info.getStartTs();
-        return time >= startRangeTime && time <= endRangeTime ;
-      }
-    };
-
-    BackupInfo.Filter tableFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        List<TableName> tables = info.getTableNames();
-        return !Collections.disjoint(allTables, tables);
-      }
-    };
-
-    BackupInfo.Filter typeFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        return info.getType() == BackupType.INCREMENTAL;
-      }
+    BackupInfo.Filter tableFilter = info -> {
+      List<TableName> tables = info.getTableNames();
+      return !Collections.disjoint(allTables, tables);
     };
 
-    BackupInfo.Filter stateFilter = new  BackupInfo.Filter() {
-      @Override
-      public boolean apply(BackupInfo info) {
-        return info.getState() == BackupState.COMPLETE;
-      }
-    };
+    BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
+    BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
 
-    List<BackupInfo> allInfos =
-        table.getBackupHistory( -1, destinationFilter,
+    List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter,
           timeRangeFilter, tableFilter, typeFilter, stateFilter);
     if (allInfos.size() != allBackups.size()) {
       // Yes we have at least one  hole in backup image sequence
-      List<String> missingIds = new ArrayList<String>();
+      List<String> missingIds = new ArrayList<>();
       for(BackupInfo info: allInfos) {
         if(allBackups.contains(info.getBackupId())) {
           continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index bbedcee..c310178 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.backup.impl;
 
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
@@ -34,8 +36,6 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKE
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
 import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
 
 import java.io.IOException;
 import java.net.URI;
@@ -62,17 +62,16 @@ import org.apache.hadoop.hbase.backup.util.BackupSet;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
 /**
  * General backup commands, options and usage messages
  */
-
 @InterfaceAudience.Private
 public final class BackupCommands {
-
   public final static String INCORRECT_USAGE = "Incorrect usage";
 
   public final static String TOP_LEVEL_NOT_ALLOWED =
@@ -173,7 +172,6 @@ public final class BackupCommands {
             System.err.println("Backup system recovery is required.");
             throw new IOException("Failed backup MERGE found, aborted command execution");
           }
-
         }
       }
     }
@@ -209,47 +207,49 @@ public final class BackupCommands {
   }
 
   public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) {
-    Command cmd = null;
+    Command cmd;
     switch (type) {
-    case CREATE:
-      cmd = new CreateCommand(conf, cmdline);
-      break;
-    case DESCRIBE:
-      cmd = new DescribeCommand(conf, cmdline);
-      break;
-    case PROGRESS:
-      cmd = new ProgressCommand(conf, cmdline);
-      break;
-    case DELETE:
-      cmd = new DeleteCommand(conf, cmdline);
-      break;
-    case HISTORY:
-      cmd = new HistoryCommand(conf, cmdline);
-      break;
-    case SET:
-      cmd = new BackupSetCommand(conf, cmdline);
-      break;
-    case REPAIR:
-      cmd = new RepairCommand(conf, cmdline);
-      break;
-    case MERGE:
-      cmd = new MergeCommand(conf, cmdline);
-      break;
-    case HELP:
-    default:
-      cmd = new HelpCommand(conf, cmdline);
-      break;
+      case CREATE:
+        cmd = new CreateCommand(conf, cmdline);
+        break;
+      case DESCRIBE:
+        cmd = new DescribeCommand(conf, cmdline);
+        break;
+      case PROGRESS:
+        cmd = new ProgressCommand(conf, cmdline);
+        break;
+      case DELETE:
+        cmd = new DeleteCommand(conf, cmdline);
+        break;
+      case HISTORY:
+        cmd = new HistoryCommand(conf, cmdline);
+        break;
+      case SET:
+        cmd = new BackupSetCommand(conf, cmdline);
+        break;
+      case REPAIR:
+        cmd = new RepairCommand(conf, cmdline);
+        break;
+      case MERGE:
+        cmd = new MergeCommand(conf, cmdline);
+        break;
+      case HELP:
+      default:
+        cmd = new HelpCommand(conf, cmdline);
+        break;
     }
     return cmd;
   }
 
   static int numOfArgs(String[] args) {
-    if (args == null) return 0;
+    if (args == null) {
+      return 0;
+    }
+
     return args.length;
   }
 
   public static class CreateCommand extends Command {
-
     CreateCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -293,7 +293,7 @@ public final class BackupCommands {
       if (isRootFolder(targetBackupDir)) {
         throw new IOException(TOP_LEVEL_NOT_ALLOWED);
       }
-      String tables = null;
+      String tables;
 
       // Check if we have both: backup set and list of tables
       if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) {
@@ -359,7 +359,11 @@ public final class BackupCommands {
         Path p = new Path(path);
         Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
         URI uri = p.toUri();
-        if (uri.getScheme() == null) return false;
+
+        if (uri.getScheme() == null) {
+          return false;
+        }
+
         FileSystem.get(uri, conf);
         return true;
       } catch (Exception e) {
@@ -370,7 +374,11 @@ public final class BackupCommands {
     private String getTablesForSet(String name, Configuration conf) throws IOException {
       try (final BackupSystemTable table = new BackupSystemTable(conn)) {
         List<TableName> tables = table.describeBackupSet(name);
-        if (tables == null) return null;
+
+        if (tables == null) {
+          return null;
+        }
+
         return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
       }
     }
@@ -392,12 +400,10 @@ public final class BackupCommands {
       helpFormatter.setWidth(100);
       helpFormatter.setSyntaxPrefix("Options:");
       helpFormatter.printHelp(" ", null, options, USAGE_FOOTER);
-
     }
   }
 
   private static class HelpCommand extends Command {
-
     HelpCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -449,7 +455,6 @@ public final class BackupCommands {
   }
 
   private static class DescribeCommand extends Command {
-
     DescribeCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -488,7 +493,6 @@ public final class BackupCommands {
   }
 
   private static class ProgressCommand extends Command {
-
     ProgressCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -522,7 +526,6 @@ public final class BackupCommands {
             info = infos.get(0);
             backupId = info.getBackupId();
             System.out.println("Found ongoing session with backupId=" + backupId);
-          } else {
           }
         }
         int progress = info == null ? -1 : info.getProgress();
@@ -545,7 +548,6 @@ public final class BackupCommands {
   }
 
   private static class DeleteCommand extends Command {
-
     DeleteCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -572,11 +574,10 @@ public final class BackupCommands {
         int deleted = admin.deleteBackups(backupIds);
         System.out.println("Deleted " + deleted + " backups. Total requested: " + (args.length -1));
       } catch (IOException e) {
-        System.err
-            .println("Delete command FAILED. Please run backup repair tool to restore backup system integrity");
+        System.err.println("Delete command FAILED. Please run backup repair tool to restore backup "
+                + "system integrity");
         throw e;
       }
-
     }
 
     @Override
@@ -586,7 +587,6 @@ public final class BackupCommands {
   }
 
   private static class RepairCommand extends Command {
-
     RepairCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -637,7 +637,6 @@ public final class BackupCommands {
         sysTable.updateBackupInfo(backupInfo);
         sysTable.finishBackupExclusiveOperation();
         System.out.println("REPAIR status: finished repair failed session:\n " + backupInfo);
-
       }
     }
 
@@ -660,7 +659,6 @@ public final class BackupCommands {
         admin.deleteBackups(backupIds);
       }
       System.out.println("DELETE operation finished OK: " + StringUtils.join(backupIds));
-
     }
 
     private void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable)
@@ -684,7 +682,6 @@ public final class BackupCommands {
         admin.mergeBackups(backupIds);
       }
       System.out.println("MERGE operation finished OK: " + StringUtils.join(backupIds));
-
     }
 
     @Override
@@ -694,7 +691,6 @@ public final class BackupCommands {
   }
 
   private static class MergeCommand extends Command {
-
     MergeCommand(Configuration conf, CommandLine cmdline) {
       super(conf);
       this.cmdline = cmdline;
@@ -744,7 +740,6 @@ public final class BackupCommands {
   }
 
   private static class HistoryCommand extends Command {
-
     private final static int DEFAULT_HISTORY_LENGTH = 10;
 
     HistoryCommand(Configuration conf, CommandLine cmdline) {
@@ -754,14 +749,16 @@ public final class BackupCommands {
 
     @Override
     public void execute() throws IOException {
-
       int n = parseHistoryLength();
       final TableName tableName = getTableName();
       final String setName = getTableSetName();
       BackupInfo.Filter tableNameFilter = new BackupInfo.Filter() {
         @Override
         public boolean apply(BackupInfo info) {
-          if (tableName == null) return true;
+          if (tableName == null) {
+            return true;
+          }
+
           List<TableName> names = info.getTableNames();
           return names.contains(tableName);
         }
@@ -769,13 +766,16 @@ public final class BackupCommands {
       BackupInfo.Filter tableSetFilter = new BackupInfo.Filter() {
         @Override
         public boolean apply(BackupInfo info) {
-          if (setName == null) return true;
+          if (setName == null) {
+            return true;
+          }
+
           String backupId = info.getBackupId();
           return backupId.startsWith(setName);
         }
       };
       Path backupRootPath = getBackupRootPath();
-      List<BackupInfo> history = null;
+      List<BackupInfo> history;
       if (backupRootPath == null) {
         // Load from backup system table
         super.execute();
@@ -796,7 +796,11 @@ public final class BackupCommands {
       String value = null;
       try {
         value = cmdline.getOptionValue(OPTION_PATH);
-        if (value == null) return null;
+
+        if (value == null) {
+          return null;
+        }
+
         return new Path(value);
       } catch (IllegalArgumentException e) {
         System.out.println("ERROR: Illegal argument for backup root path: " + value);
@@ -807,7 +811,11 @@ public final class BackupCommands {
 
     private TableName getTableName() throws IOException {
       String value = cmdline.getOptionValue(OPTION_TABLE);
-      if (value == null) return null;
+
+      if (value == null) {
+        return null;
+      }
+
       try {
         return TableName.valueOf(value);
       } catch (IllegalArgumentException e) {
@@ -817,15 +825,17 @@ public final class BackupCommands {
       }
     }
 
-    private String getTableSetName() throws IOException {
-      String value = cmdline.getOptionValue(OPTION_SET);
-      return value;
+    private String getTableSetName() {
+      return cmdline.getOptionValue(OPTION_SET);
     }
 
     private int parseHistoryLength() throws IOException {
       String value = cmdline.getOptionValue(OPTION_RECORD_NUMBER);
       try {
-        if (value == null) return DEFAULT_HISTORY_LENGTH;
+        if (value == null) {
+          return DEFAULT_HISTORY_LENGTH;
+        }
+
         return Integer.parseInt(value);
       } catch (NumberFormatException e) {
         System.out.println("Illegal argument for history length: " + value);
@@ -877,24 +887,23 @@ public final class BackupCommands {
       BackupCommand cmd = getCommand(cmdStr);
 
       switch (cmd) {
-      case SET_ADD:
-        processSetAdd(args);
-        break;
-      case SET_REMOVE:
-        processSetRemove(args);
-        break;
-      case SET_DELETE:
-        processSetDelete(args);
-        break;
-      case SET_DESCRIBE:
-        processSetDescribe(args);
-        break;
-      case SET_LIST:
-        processSetList(args);
-        break;
-      default:
-        break;
-
+        case SET_ADD:
+          processSetAdd(args);
+          break;
+        case SET_REMOVE:
+          processSetRemove(args);
+          break;
+        case SET_DELETE:
+          processSetDelete(args);
+          break;
+        case SET_DESCRIBE:
+          processSetDescribe(args);
+          break;
+        case SET_LIST:
+          processSetList(args);
+          break;
+        default:
+          break;
       }
     }
 
@@ -991,20 +1000,21 @@ public final class BackupCommands {
     }
 
     private BackupCommand getCommand(String cmdStr) throws IOException {
-      if (cmdStr.equals(SET_ADD_CMD)) {
-        return BackupCommand.SET_ADD;
-      } else if (cmdStr.equals(SET_REMOVE_CMD)) {
-        return BackupCommand.SET_REMOVE;
-      } else if (cmdStr.equals(SET_DELETE_CMD)) {
-        return BackupCommand.SET_DELETE;
-      } else if (cmdStr.equals(SET_DESCRIBE_CMD)) {
-        return BackupCommand.SET_DESCRIBE;
-      } else if (cmdStr.equals(SET_LIST_CMD)) {
-        return BackupCommand.SET_LIST;
-      } else {
-        System.out.println("ERROR: Unknown command for 'set' :" + cmdStr);
-        printUsage();
-        throw new IOException(INCORRECT_USAGE);
+      switch (cmdStr) {
+        case SET_ADD_CMD:
+          return BackupCommand.SET_ADD;
+        case SET_REMOVE_CMD:
+          return BackupCommand.SET_REMOVE;
+        case SET_DELETE_CMD:
+          return BackupCommand.SET_DELETE;
+        case SET_DESCRIBE_CMD:
+          return BackupCommand.SET_DESCRIBE;
+        case SET_LIST_CMD:
+          return BackupCommand.SET_LIST;
+        default:
+          System.out.println("ERROR: Unknown command for 'set' :" + cmdStr);
+          printUsage();
+          throw new IOException(INCORRECT_USAGE);
       }
     }
 
@@ -1012,6 +1022,5 @@ public final class BackupCommands {
     protected void printUsage() {
       System.out.println(SET_CMD_USAGE);
     }
-
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index 4ca998c..f09d6d0 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -81,7 +81,6 @@ public class BackupManager implements Closeable {
     this.conf = conf;
     this.conn = conn;
     this.systemTable = new BackupSystemTable(conn);
-
   }
 
   /**
@@ -113,14 +112,14 @@ public class BackupManager implements Closeable {
     if (classes == null) {
       conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, masterProcedureClass);
     } else if (!classes.contains(masterProcedureClass)) {
-      conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + "," + masterProcedureClass);
+      conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + ","
+              + masterProcedureClass);
     }
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Added log cleaner: " + cleanerClass + "\n" + "Added master procedure manager: "
           + masterProcedureClass);
     }
-
   }
 
   /**
@@ -170,7 +169,6 @@ public class BackupManager implements Closeable {
    */
   @Override
   public void close() {
-
     if (systemTable != null) {
       try {
         systemTable.close();
@@ -200,7 +198,6 @@ public class BackupManager implements Closeable {
     if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
       // If table list is null for full backup, which means backup all tables. Then fill the table
       // list with all user tables from meta. It no table available, throw the request exception.
-
       HTableDescriptor[] htds = null;
       try (Admin admin = conn.getAdmin()) {
         htds = admin.listTables();
@@ -242,7 +239,6 @@ public class BackupManager implements Closeable {
    * @throws IOException exception
    */
   private String getOngoingBackupId() throws IOException {
-
     ArrayList<BackupInfo> sessions = systemTable.getBackupInfos(BackupState.RUNNING);
     if (sessions.size() == 0) {
       return null;
@@ -272,13 +268,11 @@ public class BackupManager implements Closeable {
    * @param backupInfo The backup info for the current backup
    * @return The ancestors for the current backup
    * @throws IOException exception
-   * @throws BackupException exception
    */
-  public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo) throws IOException,
-      BackupException {
+  public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo) throws IOException  {
     LOG.debug("Getting the direct ancestors of the current backup " + backupInfo.getBackupId());
 
-    ArrayList<BackupImage> ancestors = new ArrayList<BackupImage>();
+    ArrayList<BackupImage> ancestors = new ArrayList<>();
 
     // full backup does not have ancestor
     if (backupInfo.getType() == BackupType.FULL) {
@@ -287,7 +281,6 @@ public class BackupManager implements Closeable {
     }
 
     // get all backup history list in descending order
-
     ArrayList<BackupInfo> allHistoryList = getBackupHistory(true);
     for (BackupInfo backup : allHistoryList) {
 
@@ -327,7 +320,8 @@ public class BackupManager implements Closeable {
           ancestors.add(lastIncrImage);
 
           LOG.debug("Last dependent incremental backup image: " + "{BackupID="
-              + lastIncrImage.getBackupId() + "," + "BackupDir=" + lastIncrImage.getRootDir() + "}");
+                  + lastIncrImage.getBackupId() + "," + "BackupDir=" + lastIncrImage.getRootDir()
+                  + "}");
         }
       }
     }
@@ -340,13 +334,12 @@ public class BackupManager implements Closeable {
    * @param backupInfo backup info
    * @param table table
    * @return backupImages on the dependency list
-   * @throws BackupException exception
    * @throws IOException exception
    */
   public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table)
-      throws BackupException, IOException {
+      throws IOException {
     ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
-    ArrayList<BackupImage> tableAncestors = new ArrayList<BackupImage>();
+    ArrayList<BackupImage> tableAncestors = new ArrayList<>();
     for (BackupImage image : ancestors) {
       if (image.hasTable(table)) {
         tableAncestors.add(image);
@@ -489,9 +482,10 @@ public class BackupManager implements Closeable {
   }
 
   /**
-   * Get WAL files iterator
+   * Get WAL files iterator.
+   *
    * @return WAL files iterator from backup system table
-   * @throws IOException
+   * @throws IOException if getting the WAL files iterator fails
    */
   public Iterator<BackupSystemTable.WALItem> getWALFilesFromBackupSystem() throws IOException {
     return systemTable.getWALFilesIterator(backupInfo.getBackupRootDir());

http://git-wip-us.apache.org/repos/asf/hbase/blob/c2236b77/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
index c579bca..4c38219 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
  */
 @InterfaceAudience.Private
 public class BackupManifest {
-
   private static final Logger LOG = LoggerFactory.getLogger(BackupManifest.class);
 
   // manifest file name
@@ -66,9 +65,7 @@ public class BackupManifest {
    * Backup image, the dependency graph is made up by series of backup images BackupImage contains
    * all the relevant information to restore the backup and is used during restore operation
    */
-
   public static class BackupImage implements Comparable<BackupImage> {
-
     static class Builder {
       BackupImage image;
 
@@ -145,7 +142,7 @@ public class BackupManifest {
       long startTs = im.getStartTs();
       long completeTs = im.getCompleteTs();
       List<HBaseProtos.TableName> tableListList = im.getTableListList();
-      List<TableName> tableList = new ArrayList<TableName>();
+      List<TableName> tableList = new ArrayList<>();
       for (HBaseProtos.TableName tn : tableListList) {
         tableList.add(ProtobufUtil.toTableName(tn));
       }
@@ -194,14 +191,17 @@ public class BackupManifest {
         BackupProtos.BackupImage proto) {
       List<BackupProtos.TableServerTimestamp> list = proto.getTstMapList();
 
-      HashMap<TableName, HashMap<String, Long>> incrTimeRanges =
-          new HashMap<TableName, HashMap<String, Long>>();
-      if (list == null || list.size() == 0) return incrTimeRanges;
+      HashMap<TableName, HashMap<String, Long>> incrTimeRanges = new HashMap<>();
+
+      if (list == null || list.size() == 0) {
+        return incrTimeRanges;
+      }
+
       for (BackupProtos.TableServerTimestamp tst : list) {
         TableName tn = ProtobufUtil.toTableName(tst.getTableName());
         HashMap<String, Long> map = incrTimeRanges.get(tn);
         if (map == null) {
-          map = new HashMap<String, Long>();
+          map = new HashMap<>();
           incrTimeRanges.put(tn, map);
         }
         List<BackupProtos.ServerTimestamp> listSt = tst.getServerTimestampList();
@@ -290,13 +290,13 @@ public class BackupManifest {
 
     public ArrayList<BackupImage> getAncestors() {
       if (this.ancestors == null) {
-        this.ancestors = new ArrayList<BackupImage>();
+        this.ancestors = new ArrayList<>();
       }
       return this.ancestors;
     }
 
     public void removeAncestors(List<String> backupIds) {
-      List<BackupImage> toRemove = new ArrayList<BackupImage>();
+      List<BackupImage> toRemove = new ArrayList<>();
       for (BackupImage im : this.ancestors) {
         if (backupIds.contains(im.getBackupId())) {
           toRemove.add(im);
@@ -377,7 +377,6 @@ public class BackupManifest {
    * @param backup The ongoing backup info
    */
   public BackupManifest(BackupInfo backup) {
-
     BackupImage.Builder builder = BackupImage.newBuilder();
     this.backupImage =
         builder.withBackupId(backup.getBackupId()).withType(backup.getType())
@@ -402,11 +401,11 @@ public class BackupManifest {
 
   /**
    * Construct manifest from a backup directory.
+   *
    * @param conf configuration
    * @param backupPath backup path
-   * @throws IOException
+   * @throws IOException if constructing the manifest from the backup directory fails
    */
-
   public BackupManifest(Configuration conf, Path backupPath) throws IOException {
     this(backupPath.getFileSystem(conf), backupPath);
   }
@@ -417,7 +416,6 @@ public class BackupManifest {
    * @param backupPath backup path
    * @throws BackupException exception
    */
-
   public BackupManifest(FileSystem fs, Path backupPath) throws BackupException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Loading manifest from: " + backupPath.toString());
@@ -427,7 +425,6 @@ public class BackupManifest {
     // This variable's purpose is to keep the correct and original location so
     // that we can store/persist it.
     try {
-
       FileStatus[] subFiles = BackupUtils.listStatus(fs, backupPath, null);
       if (subFiles == null) {
         String errorMsg = backupPath.toString() + " does not exist";
@@ -436,7 +433,6 @@ public class BackupManifest {
       }
       for (FileStatus subFile : subFiles) {
         if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) {
-
           // load and set manifest field from file content
           FSDataInputStream in = fs.open(subFile.getPath());
           long len = subFile.getLen();
@@ -456,7 +452,6 @@ public class BackupManifest {
       }
       String errorMsg = "No manifest file found in: " + backupPath.toString();
       throw new IOException(errorMsg);
-
     } catch (IOException e) {
       throw new BackupException(e.getMessage());
     }
@@ -478,7 +473,6 @@ public class BackupManifest {
    * TODO: fix it. Persist the manifest file.
    * @throws IOException IOException when storing the manifest file.
    */
-
   public void store(Configuration conf) throws BackupException {
     byte[] data = backupImage.toProto().toByteArray();
     // write the file, overwrite if already exist
@@ -529,12 +523,12 @@ public class BackupManifest {
    * @return the backup image list for restore in time order
    */
   public ArrayList<BackupImage> getRestoreDependentList(boolean reverse) {
-    TreeMap<Long, BackupImage> restoreImages = new TreeMap<Long, BackupImage>();
+    TreeMap<Long, BackupImage> restoreImages = new TreeMap<>();
     restoreImages.put(backupImage.startTs, backupImage);
     for (BackupImage image : backupImage.getAncestors()) {
       restoreImages.put(Long.valueOf(image.startTs), image);
     }
-    return new ArrayList<BackupImage>(reverse ? (restoreImages.descendingMap().values())
+    return new ArrayList<>(reverse ? (restoreImages.descendingMap().values())
         : (restoreImages.values()));
   }
 
@@ -545,7 +539,7 @@ public class BackupManifest {
    * @return the backup image list for a table in time order
    */
   public ArrayList<BackupImage> getDependentListByTable(TableName table) {
-    ArrayList<BackupImage> tableImageList = new ArrayList<BackupImage>();
+    ArrayList<BackupImage> tableImageList = new ArrayList<>();
     ArrayList<BackupImage> imageList = getRestoreDependentList(true);
     for (BackupImage image : imageList) {
       if (image.hasTable(table)) {
@@ -567,7 +561,7 @@ public class BackupManifest {
    *         dependency of this image
    */
   public ArrayList<BackupImage> getAllDependentListByTable(TableName table) {
-    ArrayList<BackupImage> tableImageList = new ArrayList<BackupImage>();
+    ArrayList<BackupImage> tableImageList = new ArrayList<>();
     ArrayList<BackupImage> imageList = getRestoreDependentList(false);
     for (BackupImage image : imageList) {
       if (image.hasTable(table)) {
@@ -596,7 +590,7 @@ public class BackupManifest {
     }
     List<TableName> image1TableList = image1.getTableNames();
     List<TableName> image2TableList = image2.getTableNames();
-    boolean found = false;
+    boolean found;
     for (int i = 0; i < image2TableList.size(); i++) {
       found = false;
       for (int j = 0; j < image1TableList.size(); j++) {
@@ -634,14 +628,14 @@ public class BackupManifest {
       }
     }
 
-    ArrayList<String> image1TableList = new ArrayList<String>();
+    ArrayList<String> image1TableList = new ArrayList<>();
     for (BackupImage image1 : fullImages) {
       List<TableName> tableList = image1.getTableNames();
       for (TableName table : tableList) {
         image1TableList.add(table.getNameAsString());
       }
     }
-    ArrayList<String> image2TableList = new ArrayList<String>();
+    ArrayList<String> image2TableList = new ArrayList<>();
     List<TableName> tableList = image.getTableNames();
     for (TableName table : tableList) {
       image2TableList.add(table.getNameAsString());