You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/12/12 18:33:02 UTC

hbase git commit: HBASE-16940 Address review of "Backup/Restore (HBASE-7912, HBASE-14030, HBASE-14123) mega patch" posted on RB - fix findbugs warnings (Vladimir Rodionov)

Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 3d967f96d -> 8d62d9a0d


HBASE-16940 Address review of "Backup/Restore (HBASE-7912, HBASE-14030, HBASE-14123) mega patch" posted on RB - fix findbugs warnings (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d62d9a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d62d9a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d62d9a0

Branch: refs/heads/HBASE-7912
Commit: 8d62d9a0d567ec9bb94adfc9c4505c0bf33e6627
Parents: 3d967f9
Author: tedyu <yu...@gmail.com>
Authored: Mon Dec 12 10:32:41 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Dec 12 10:32:41 2016 -0800

----------------------------------------------------------------------
 .../hbase/backup/impl/BackupAdminImpl.java      |  2 +-
 .../backup/impl/IncrementalBackupManager.java   | 56 ++++++++++----------
 .../mapreduce/MapReduceBackupCopyTask.java      |  3 +-
 3 files changed, 31 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d62d9a0/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
index 0e094d5..b73e576 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -54,7 +54,7 @@ import com.google.common.collect.Lists;
 
 /**
  * The administrative API implementation for HBase Backup . Create an instance from
- * {@link BackupAdminImpl(Connection)} and call {@link #close()} afterwards.
+ * {@link #BackupAdminImpl(Connection)} and call {@link #close()} afterwards.
  * <p>BackupAdmin can be used to create backups, restore data from backups and for
  * other backup-related operations.
  *

http://git-wip-us.apache.org/repos/asf/hbase/blob/8d62d9a0/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index ab5c0c1..8af1044 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -263,7 +263,7 @@ public class IncrementalBackupManager extends BackupManager{
         currentLogTS = BackupClientUtil.getCreationTime(log.getPath());
         // newestTimestamps is up-to-date with the current list of hosts
         // so newestTimestamps.get(host) will not be null.
-        if (Long.valueOf(currentLogTS) > Long.valueOf(newestTimestamps.get(host))) {
+        if (currentLogTS > newestTimestamps.get(host)) {
           newestLogs.add(currentLogFile);
         }
       }
@@ -293,13 +293,13 @@ public class IncrementalBackupManager extends BackupManager{
        * last backup.
        */
       if (oldTimeStamp == null) {
-        if (Long.valueOf(currentLogTS) < Long.valueOf(savedStartCode)) {
+        if (currentLogTS < Long.valueOf(savedStartCode)) {
           // This log file is really old, its region server was before our last backup.
           continue;
         } else {
           resultLogFiles.add(currentLogFile);
         }
-      } else if (Long.valueOf(currentLogTS) > Long.valueOf(oldTimeStamp)) {
+      } else if (currentLogTS > oldTimeStamp) {
         resultLogFiles.add(currentLogFile);
       }
 
@@ -308,7 +308,7 @@ public class IncrementalBackupManager extends BackupManager{
       // Even if these logs belong to a obsolete region server, we still need
       // to include they to avoid loss of edits for backup.
       Long newTimestamp = newestTimestamps.get(host);
-      if (newTimestamp != null && Long.valueOf(currentLogTS) > Long.valueOf(newTimestamp)) {
+      if (newTimestamp != null && currentLogTS > newTimestamp) {
         newestLogs.add(currentLogFile);
       }
     }
@@ -317,34 +317,34 @@ public class IncrementalBackupManager extends BackupManager{
     return resultLogFiles;
   }
 
-  class NewestLogFilter implements PathFilter {
-    private Long lastBackupTS = 0L;
 
-    public NewestLogFilter() {
-    }
+static class NewestLogFilter implements PathFilter {
+  private Long lastBackupTS = 0L;
 
-    protected void setLastBackupTS(Long ts) {
-      this.lastBackupTS = ts;
-    }
+  public NewestLogFilter() {
+  }
 
-    @Override
-    public boolean accept(Path path) {
-      // skip meta table log -- ts.meta file
-      if (DefaultWALProvider.isMetaFile(path)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skip .meta log file: " + path.getName());
-        }
-        return false;
-      }
-      Long timestamp = null;
-      try {
-        timestamp = BackupClientUtil.getCreationTime(path);
-        return timestamp > Long.valueOf(lastBackupTS);
-      } catch (Exception e) {
-        LOG.warn("Cannot read timestamp of log file " + path);
-        return false;
+  protected void setLastBackupTS(Long ts) {
+    this.lastBackupTS = ts;
+  }
+
+  @Override
+  public boolean accept(Path path) {
+    // skip meta table log -- ts.meta file
+    if (DefaultWALProvider.isMetaFile(path)) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Skip .meta log file: " + path.getName());
       }
+      return false;
+    }
+    Long timestamp = null;
+    try {
+      timestamp = BackupClientUtil.getCreationTime(path);
+      return timestamp > lastBackupTS;
+    } catch (Exception e) {
+      LOG.warn("Cannot read timestamp of log file " + path);
+      return false;
     }
   }
-
+}
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8d62d9a0/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
index 8d404d9..60bd065 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyTask.java
@@ -211,7 +211,8 @@ public class MapReduceBackupCopyTask implements BackupCopyTask {
 
         // Update the copy progress to ZK every 0.5s if progress value changed
         int progressReportFreq =
-            this.getConf().getInt("hbase.backup.progressreport.frequency", 500);
+            MapReduceBackupCopyTask.this.getConf().
+              getInt("hbase.backup.progressreport.frequency", 500);
         float lastProgress = progressDone;
         while (!job.isComplete()) {
           float newProgress =