You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/03/31 22:54:31 UTC

[1/9] hbase git commit: HBASE-14417 Incremental backup and bulk loading

Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 cb4fac1d1 -> 1c4d9c896


HBASE-14417 Incremental backup and bulk loading


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0345fc87
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0345fc87
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0345fc87

Branch: refs/heads/hbase-12439
Commit: 0345fc87759a7d44ecc385327ebb586fc632fb65
Parents: cb4fac1
Author: tedyu <yu...@gmail.com>
Authored: Tue Mar 28 16:23:36 2017 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue Mar 28 16:23:36 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/backup/BackupHFileCleaner.java | 180 +++++++++
 .../hadoop/hbase/backup/BackupObserver.java     | 102 +++++
 .../hbase/backup/impl/BackupAdminImpl.java      |  28 +-
 .../hadoop/hbase/backup/impl/BackupManager.java |  16 +
 .../hbase/backup/impl/BackupSystemTable.java    | 393 ++++++++++++++++++-
 .../impl/IncrementalTableBackupClient.java      | 124 ++++++
 .../hbase/backup/impl/RestoreTablesClient.java  |  96 +++--
 .../backup/mapreduce/MapReduceRestoreJob.java   |   6 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |  20 +-
 .../hadoop/hbase/util/HFileArchiveUtil.java     |  16 +
 .../hadoop/hbase/backup/TestBackupBase.java     |  15 +
 .../hbase/backup/TestBackupHFileCleaner.java    | 141 +++++++
 .../TestIncrementalBackupWithBulkLoad.java      | 145 +++++++
 .../mapreduce/TestLoadIncrementalHFiles.java    |  46 ++-
 14 files changed, 1275 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
new file mode 100644
index 0000000..b6b4c0a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+
+/**
+ * Implementation of a file cleaner that checks if an hfile is still referenced by backup before
+ * deleting it from hfile archive directory.
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abortable {
+  private static final Log LOG = LogFactory.getLog(BackupHFileCleaner.class);
+  private boolean stopped = false;
+  private boolean aborted;
+  private Configuration conf;
+  private Connection connection;
+  private long prevReadFromBackupTbl = 0, // timestamp of most recent read from hbase:backup table
+      secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from hbase:backup table
+  //used by unit test to skip reading hbase:backup
+  private boolean checkForFullyBackedUpTables = true;
+  private List<TableName> fullyBackedUpTables = null;
+
+  private Set<String> getFilenameFromBulkLoad(Map<byte[], List<Path>>[] maps) {
+    Set<String> filenames = new HashSet<String>();
+    for (Map<byte[], List<Path>> map : maps) {
+      if (map == null) continue;
+      for (List<Path> paths : map.values()) {
+        for (Path p : paths) {
+          filenames.add(p.getName());
+        }
+      }
+    }
+    return filenames;
+  }
+
+  private Set<String> loadHFileRefs(List<TableName> tableList) throws IOException {
+    if (connection == null) {
+      connection = ConnectionFactory.createConnection(conf);
+    }
+    try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
+      Map<byte[], List<Path>>[] res =
+          tbl.readBulkLoadedFiles(null, tableList);
+      secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
+      prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
+      return getFilenameFromBulkLoad(res);
+    }
+  }
+
+  @VisibleForTesting
+  void setCheckForFullyBackedUpTables(boolean b) {
+    checkForFullyBackedUpTables = b;
+  }
+  @Override
+  public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
+    if (conf == null) {
+      return files;
+    }
+    // obtain the Set of TableName's which have been fully backed up
+    // so that we filter BulkLoad to be returned from server
+    if (checkForFullyBackedUpTables) {
+      if (connection == null) return files;
+      try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
+        fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
+      } catch (IOException ioe) {
+        LOG.error("Failed to get tables which have been fully backed up, skipping checking", ioe);
+        return Collections.emptyList();
+      }
+      Collections.sort(fullyBackedUpTables);
+    }
+    final Set<String> hfileRefs;
+    try {
+      hfileRefs = loadHFileRefs(fullyBackedUpTables);
+    } catch (IOException ioe) {
+      LOG.error("Failed to read hfile references, skipping checking deletable files", ioe);
+      return Collections.emptyList();
+    }
+    Iterable<FileStatus> deletables = Iterables.filter(files, new Predicate<FileStatus>() {
+      @Override
+      public boolean apply(FileStatus file) {
+        // If the file is recent, be conservative and wait for one more scan of hbase:backup table
+        if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
+          return false;
+        }
+        String hfile = file.getPath().getName();
+        boolean foundHFileRef = hfileRefs.contains(hfile);
+        return !foundHFileRef;
+      }
+    });
+    return deletables;
+  }
+
+  @Override
+  public boolean isFileDeletable(FileStatus fStat) {
+    // work is done in getDeletableFiles()
+    return true;
+  }
+
+  @Override
+  public void setConf(Configuration config) {
+    this.conf = config;
+    this.connection = null;
+    try {
+      this.connection = ConnectionFactory.createConnection(conf);
+    } catch (IOException ioe) {
+      LOG.error("Couldn't establish connection", ioe);
+    }
+  }
+
+  @Override
+  public void stop(String why) {
+    if (this.stopped) {
+      return;
+    }
+    if (this.connection != null) {
+      try {
+        this.connection.close();
+      } catch (IOException ioe) {
+        LOG.debug("Got " + ioe + " when closing connection");
+      }
+    }
+    this.stopped = true;
+  }
+
+  @Override
+  public boolean isStopped() {
+    return this.stopped;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+    LOG.warn("Aborting ReplicationHFileCleaner because " + why, e);
+    this.aborted = true;
+    stop(why);
+  }
+
+  @Override
+  public boolean isAborted() {
+    return this.aborted;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java
new file mode 100644
index 0000000..595e862
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * An Observer to facilitate backup operations
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class BackupObserver implements RegionObserver {
+  private static final Log LOG = LogFactory.getLog(BackupObserver.class);
+  @Override
+  public boolean postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
+    List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths,
+    boolean hasLoaded) throws IOException {
+    Configuration cfg = ctx.getEnvironment().getConfiguration();
+    if (!hasLoaded) {
+      // there is no need to record state
+      return hasLoaded;
+    }
+    if (finalPaths == null || !BackupManager.isBackupEnabled(cfg)) {
+      LOG.debug("skipping recording bulk load in postBulkLoadHFile since backup is disabled");
+      return hasLoaded;
+    }
+    try (Connection connection = ConnectionFactory.createConnection(cfg);
+        BackupSystemTable tbl = new BackupSystemTable(connection)) {
+      List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
+      HRegionInfo info = ctx.getEnvironment().getRegionInfo();
+      TableName tableName = info.getTable();
+      if (!fullyBackedUpTables.contains(tableName)) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(tableName + " has not gone thru full backup");
+        }
+        return hasLoaded;
+      }
+      tbl.writePathsPostBulkLoad(tableName, info.getEncodedNameAsBytes(), finalPaths);
+      return hasLoaded;
+    } catch (IOException ioe) {
+      LOG.error("Failed to get tables which have been fully backed up", ioe);
+      return false;
+    }
+  }
+  @Override
+  public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
+      final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
+    Configuration cfg = ctx.getEnvironment().getConfiguration();
+    if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
+      LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
+      return;
+    }
+    try (Connection connection = ConnectionFactory.createConnection(cfg);
+        BackupSystemTable tbl = new BackupSystemTable(connection)) {
+      List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
+      HRegionInfo info = ctx.getEnvironment().getRegionInfo();
+      TableName tableName = info.getTable();
+      if (!fullyBackedUpTables.contains(tableName)) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(tableName + " has not gone thru full backup");
+        }
+        return;
+      }
+      tbl.writeFilesForBulkLoadPreCommit(tableName, info.getEncodedNameAsBytes(), family, pairs);
+      return;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
index c1d5258..eb60860 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -188,7 +188,33 @@ public class BackupAdminImpl implements BackupAdmin {
           removeTableFromBackupImage(info, tn, sysTable);
         }
       }
-      LOG.debug("Delete backup info " + backupInfo.getBackupId());
+      Map<byte[], String> map = sysTable.readBulkLoadedFiles(backupId);
+      FileSystem fs = FileSystem.get(conn.getConfiguration());
+      boolean succ = true;
+      int numDeleted = 0;
+      for (String f : map.values()) {
+        Path p = new Path(f);
+        try {
+          LOG.debug("Delete backup info " + p + " for " + backupInfo.getBackupId());
+          if (!fs.delete(p)) {
+            if (fs.exists(p)) {
+              LOG.warn(f + " was not deleted");
+              succ = false;
+            }
+          } else {
+            numDeleted++;
+          }
+        } catch (IOException ioe) {
+          LOG.warn(f + " was not deleted", ioe);
+          succ = false;
+        }
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted");
+      }
+      if (succ) {
+        sysTable.deleteBulkLoadedFiles(map);
+      }
 
       sysTable.deleteBackupInfo(backupInfo.getBackupId());
       LOG.info("Delete backup " + backupInfo.getBackupId() + " completed.");

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index 1d27e79..c09ce48 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
@@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.procedure.ProcedureManagerHost;
+import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -393,6 +395,20 @@ public class BackupManager implements Closeable {
     return systemTable.readRegionServerLastLogRollResult(backupInfo.getBackupRootDir());
   }
 
+  public Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>>
+  readBulkloadRows(List<TableName> tableList) throws IOException {
+    return systemTable.readBulkloadRows(tableList);
+  }
+
+  public void removeBulkLoadedRows(List<TableName> lst, List<byte[]> rows) throws IOException {
+    systemTable.removeBulkLoadedRows(lst, rows);
+  }
+
+  public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps)
+      throws IOException {
+    systemTable.writeBulkLoadedFiles(sTableList, maps, backupInfo.getBackupId());
+  }
+
   /**
    * Get all completed backup information (in desc order by time)
    * @return history info of BackupCompleteData

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index 6362f8e..1ba8087 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -22,11 +22,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.TreeSet;
 
 import org.apache.commons.lang.StringUtils;
@@ -35,6 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -44,6 +47,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo;
 import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
 import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
@@ -59,6 +63,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * This class provides API to access backup system table<br>
@@ -77,6 +82,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
  */
 @InterfaceAudience.Private
 public final class BackupSystemTable implements Closeable {
+  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
 
   static class WALItem {
     String backupId;
@@ -108,8 +114,6 @@ public final class BackupSystemTable implements Closeable {
 
   }
 
-  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
-
   private TableName tableName;
   /**
    *  Stores backup sessions (contexts)
@@ -119,6 +123,7 @@ public final class BackupSystemTable implements Closeable {
    * Stores other meta
    */
   final static byte[] META_FAMILY = "meta".getBytes();
+  final static byte[] BULK_LOAD_FAMILY = "bulk".getBytes();
   /**
    *  Connection to HBase cluster, shared among all instances
    */
@@ -130,9 +135,22 @@ public final class BackupSystemTable implements Closeable {
   private final static String INCR_BACKUP_SET = "incrbackupset:";
   private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:";
   private final static String RS_LOG_TS_PREFIX = "rslogts:";
+
+  private final static String BULK_LOAD_PREFIX = "bulk:";
+  private final static byte[] BULK_LOAD_PREFIX_BYTES = BULK_LOAD_PREFIX.getBytes();
+  final static byte[] TBL_COL = Bytes.toBytes("tbl");
+  final static byte[] FAM_COL = Bytes.toBytes("fam");
+  final static byte[] PATH_COL = Bytes.toBytes("path");
+  final static byte[] STATE_COL = Bytes.toBytes("state");
+  // the two states a bulk loaded file can be
+  final static byte[] BL_PREPARE = Bytes.toBytes("R");
+  final static byte[] BL_COMMIT = Bytes.toBytes("D");
+
   private final static String WALS_PREFIX = "wals:";
   private final static String SET_KEY_PREFIX = "backupset:";
 
+  // separator between BULK_LOAD_PREFIX and ordinals
+ protected final static String BLK_LD_DELIM = ":";
   private final static byte[] EMPTY_VALUE = new byte[] {};
 
   // Safe delimiter in a string
@@ -196,6 +214,97 @@ public final class BackupSystemTable implements Closeable {
     }
   }
 
+  /*
+   * @param backupId the backup Id
+   * @return Map of rows to path of bulk loaded hfile
+   */
+  Map<byte[], String> readBulkLoadedFiles(String backupId) throws IOException {
+    Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
+    try (Table table = connection.getTable(tableName);
+        ResultScanner scanner = table.getScanner(scan)) {
+      Result res = null;
+      Map<byte[], String> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+      while ((res = scanner.next()) != null) {
+        res.advance();
+        byte[] row = CellUtil.cloneRow(res.listCells().get(0));
+        for (Cell cell : res.listCells()) {
+          if (CellComparator.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
+              BackupSystemTable.PATH_COL.length) == 0) {
+            map.put(row, Bytes.toString(CellUtil.cloneValue(cell)));
+          }
+        }
+      }
+      return map;
+    }
+  }
+
+  /*
+   * Used during restore
+   * @param backupId the backup Id
+   * @param sTableList List of tables
+   * @return array of Map of family to List of Paths
+   */
+  public Map<byte[], List<Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList)
+      throws IOException {
+    Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId);
+    Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
+    try (Table table = connection.getTable(tableName);
+        ResultScanner scanner = table.getScanner(scan)) {
+      Result res = null;
+      while ((res = scanner.next()) != null) {
+        res.advance();
+        TableName tbl = null;
+        byte[] fam = null;
+        String path = null;
+        for (Cell cell : res.listCells()) {
+          if (CellComparator.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
+              BackupSystemTable.TBL_COL.length) == 0) {
+            tbl = TableName.valueOf(CellUtil.cloneValue(cell));
+          } else if (CellComparator.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
+              BackupSystemTable.FAM_COL.length) == 0) {
+            fam = CellUtil.cloneValue(cell);
+          } else if (CellComparator.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
+              BackupSystemTable.PATH_COL.length) == 0) {
+            path = Bytes.toString(CellUtil.cloneValue(cell));
+          }
+        }
+        int srcIdx = IncrementalTableBackupClient.getIndex(tbl, sTableList);
+        if (srcIdx == -1) {
+          // the table is not among the query
+          continue;
+        }
+        if (mapForSrc[srcIdx] == null) {
+          mapForSrc[srcIdx] = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+        }
+        List<Path> files;
+        if (!mapForSrc[srcIdx].containsKey(fam)) {
+          files = new ArrayList<Path>();
+          mapForSrc[srcIdx].put(fam, files);
+        } else {
+          files = mapForSrc[srcIdx].get(fam);
+        }
+        files.add(new Path(path));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("found bulk loaded file : " + tbl + " " +  Bytes.toString(fam) + " " + path);
+        }
+      };
+      return mapForSrc;
+    }
+  }
+
+  /*
+   * @param map Map of row keys to path of bulk loaded hfile
+   */
+  void deleteBulkLoadedFiles(Map<byte[], String> map) throws IOException {
+    try (Table table = connection.getTable(tableName)) {
+      List<Delete> dels = new ArrayList<>();
+      for (byte[] row : map.keySet()) {
+        dels.add(new Delete(row).addFamily(BackupSystemTable.META_FAMILY));
+      }
+      table.delete(dels);
+    }
+  }
+
   /**
    * Deletes backup status from backup system table table
    * @param backupId backup id
@@ -213,6 +322,156 @@ public final class BackupSystemTable implements Closeable {
     }
   }
 
+  /*
+   * For postBulkLoadHFile() hook.
+   * @param tabName table name
+   * @param region the region receiving hfile
+   * @param finalPaths family and associated hfiles
+   */
+  public void writePathsPostBulkLoad(TableName tabName, byte[] region,
+      Map<byte[], List<Path>> finalPaths) throws IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("write bulk load descriptor to backup " + tabName + " with " +
+          finalPaths.size() + " entries");
+    }
+    try (Table table = connection.getTable(tableName)) {
+      List<Put> puts = BackupSystemTable.createPutForCommittedBulkload(tabName, region,
+          finalPaths);
+      table.put(puts);
+      LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
+    }
+  }
+  /*
+   * For preCommitStoreFile() hook
+   * @param tabName table name
+   * @param region the region receiving hfile
+   * @param family column family
+   * @param pairs list of paths for hfiles
+   */
+  public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region,
+      final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("write bulk load descriptor to backup " + tabName + " with " +
+          pairs.size() + " entries");
+    }
+    try (Table table = connection.getTable(tableName)) {
+      List<Put> puts = BackupSystemTable.createPutForPreparedBulkload(tabName, region,
+          family, pairs);
+      table.put(puts);
+      LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
+    }
+  }
+
+  /*
+   * Removes rows recording bulk loaded hfiles from backup table
+   * @param lst list of table names
+   * @param rows the rows to be deleted
+   */
+  public void removeBulkLoadedRows(List<TableName> lst, List<byte[]> rows) throws IOException {
+    try (Table table = connection.getTable(tableName)) {
+      List<Delete> lstDels = new ArrayList<>();
+      for (byte[] row : rows) {
+        Delete del = new Delete(row);
+        lstDels.add(del);
+        LOG.debug("orig deleting the row: " + Bytes.toString(row));
+      }
+      table.delete(lstDels);
+      LOG.debug("deleted " + rows.size() + " original bulkload rows for " + lst.size() + " tables");
+    }
+  }
+
+  /*
+   * Reads the rows from backup table recording bulk loaded hfiles
+   * @param tableList list of table names
+   * @return The keys of the Map are table, region and column family.
+   *  Value of the map reflects whether the hfile was recorded by preCommitStoreFile hook (true)
+   */
+  public Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>>
+  readBulkloadRows(List<TableName> tableList) throws IOException {
+    Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = new HashMap<>();
+    List<byte[]> rows = new ArrayList<>();
+    for (TableName tTable : tableList) {
+      Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable);
+      Map<String, Map<String, List<Pair<String, Boolean>>>> tblMap = map.get(tTable);
+      try (Table table = connection.getTable(tableName);
+          ResultScanner scanner = table.getScanner(scan)) {
+        Result res = null;
+        while ((res = scanner.next()) != null) {
+          res.advance();
+          String fam = null;
+          String path = null;
+          boolean raw = false;
+          byte[] row = null;
+          String region = null;
+          for (Cell cell : res.listCells()) {
+            row = CellUtil.cloneRow(cell);
+            rows.add(row);
+            String rowStr = Bytes.toString(row);
+            region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr);
+            if (CellComparator.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
+                BackupSystemTable.FAM_COL.length) == 0) {
+              fam = Bytes.toString(CellUtil.cloneValue(cell));
+            } else if (CellComparator.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
+                BackupSystemTable.PATH_COL.length) == 0) {
+              path = Bytes.toString(CellUtil.cloneValue(cell));
+            } else if (CellComparator.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0,
+                BackupSystemTable.STATE_COL.length) == 0) {
+              byte[] state = CellUtil.cloneValue(cell);
+              if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) {
+                raw = true;
+              } else raw = false;
+            }
+          }
+          if (map.get(tTable) == null) {
+            map.put(tTable, new HashMap<String, Map<String, List<Pair<String, Boolean>>>>());
+            tblMap = map.get(tTable);
+          }
+          if (tblMap.get(region) == null) {
+            tblMap.put(region, new HashMap<String, List<Pair<String, Boolean>>>());
+          }
+          Map<String, List<Pair<String, Boolean>>> famMap = tblMap.get(region);
+          if (famMap.get(fam) == null) {
+            famMap.put(fam, new ArrayList<Pair<String, Boolean>>());
+          }
+          famMap.get(fam).add(new Pair<>(path, raw));
+          LOG.debug("found orig " + path + " for " + fam + " of table " + region);
+        }
+      }
+    }
+    return new Pair<>(map, rows);
+  }
+
+  /*
+   * @param sTableList List of tables
+   * @param maps array of Map of family to List of Paths
+   * @param backupId the backup Id
+   */
+  public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
+      String backupId) throws IOException {
+    try (Table table = connection.getTable(tableName)) {
+      long ts = EnvironmentEdgeManager.currentTime();
+      int cnt = 0;
+      List<Put> puts = new ArrayList<>();
+      for (int idx = 0; idx < maps.length; idx++) {
+        Map<byte[], List<Path>> map = maps[idx];
+        TableName tn = sTableList.get(idx);
+        if (map == null) continue;
+        for (Map.Entry<byte[], List<Path>> entry: map.entrySet()) {
+          byte[] fam = entry.getKey();
+          List<Path> paths = entry.getValue();
+          for (Path p : paths) {
+            Put put = BackupSystemTable.createPutForBulkLoadedFile(tn, fam, p.toString(),
+                backupId, ts, cnt++);
+            puts.add(put);
+          }
+        }
+      }
+      if (!puts.isEmpty()) {
+        table.put(puts);
+      }
+    }
+  }
+
   /**
    * Reads backup status object (instance of backup info) from backup system table table
    * @param backupId backup id
@@ -399,6 +658,21 @@ public final class BackupSystemTable implements Closeable {
 
   }
 
+  /*
+   * Retrieve TableName's for completed backup of given type
+   * @param type backup type
+   * @return List of table names
+   */
+  public List<TableName> getTablesForBackupType(BackupType type) throws IOException {
+    Set<TableName> names = new HashSet<>();
+    List<BackupInfo> infos = getBackupHistory(true);
+    for (BackupInfo info : infos) {
+      if (info.getType() != type) continue;
+      names.addAll(info.getTableNames());
+    }
+    return new ArrayList(names);
+  }
+
   /**
    * Get history for backup destination
    * @param backupRoot backup destination path
@@ -1233,6 +1507,119 @@ public final class BackupSystemTable implements Closeable {
     return s.substring(index + 1);
   }
 
+  /*
+   * Creates Put's for bulk load resulting from running LoadIncrementalHFiles
+   */
+  static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
+      Map<byte[], List<Path>> finalPaths) {
+    List<Put> puts = new ArrayList<>();
+    for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
+      for (Path path : entry.getValue()) {
+        String file = path.toString();
+        int lastSlash = file.lastIndexOf("/");
+        String filename = file.substring(lastSlash+1);
+        Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
+            Bytes.toString(region), BLK_LD_DELIM, filename));
+        put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
+        put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
+        put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL,
+            file.getBytes());
+        put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
+        puts.add(put);
+        LOG.debug("writing done bulk path " + file + " for " + table + " " +
+            Bytes.toString(region));
+      }
+    }
+    return puts;
+  }
+
+  /*
+   * Creates Put's for bulk load resulting from running LoadIncrementalHFiles
+   */
+  static List<Put> createPutForPreparedBulkload(TableName table, byte[] region,
+      final byte[] family, final List<Pair<Path, Path>> pairs) {
+    List<Put> puts = new ArrayList<>();
+    for (Pair<Path, Path> pair : pairs) {
+      Path path = pair.getSecond();
+      String file = path.toString();
+      int lastSlash = file.lastIndexOf("/");
+      String filename = file.substring(lastSlash+1);
+      Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
+          Bytes.toString(region), BLK_LD_DELIM, filename));
+      put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
+      put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
+      put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL,
+          file.getBytes());
+      put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE);
+      puts.add(put);
+      LOG.debug("writing raw bulk path " + file + " for " + table + " " +
+          Bytes.toString(region));
+    }
+    return puts;
+  }
+  public static List<Delete> createDeleteForOrigBulkLoad(List<TableName> lst) {
+    List<Delete> lstDels = new ArrayList<>();
+    for (TableName table : lst) {
+      Delete del = new Delete(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM));
+      del.addFamily(BackupSystemTable.META_FAMILY);
+      lstDels.add(del);
+    }
+    return lstDels;
+  }
+
+  static Scan createScanForOrigBulkLoadedFiles(TableName table) throws IOException {
+    Scan scan = new Scan();
+    byte[] startRow = rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM);
+    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
+    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
+    scan.withStartRow(startRow);
+    scan.withStopRow(stopRow);
+    scan.addFamily(BackupSystemTable.META_FAMILY);
+    scan.setMaxVersions(1);
+    return scan;
+  }
+  static String getTableNameFromOrigBulkLoadRow(String rowStr) {
+    String[] parts = rowStr.split(BLK_LD_DELIM);
+    return parts[1];
+  }
+  static String getRegionNameFromOrigBulkLoadRow(String rowStr) {
+    // format is bulk : namespace : table : region : file
+    String[] parts = rowStr.split(BLK_LD_DELIM);
+    int idx = 3;
+    if (parts.length == 4) {
+      // the table is in default namespace
+      idx = 2;
+    }
+    LOG.debug("bulk row string " + rowStr + " region " + parts[idx]);
+    return parts[idx];
+  }
+  /*
+   * Used to query bulk loaded hfiles which have been copied by incremental backup
+   * @param backupId the backup Id. It can be null when querying for all tables
+   * @return the Scan object
+   */
+  static Scan createScanForBulkLoadedFiles(String backupId) throws IOException {
+    Scan scan = new Scan();
+    byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES :
+      rowkey(BULK_LOAD_PREFIX, backupId+BLK_LD_DELIM);
+    byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
+    stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
+    scan.setStartRow(startRow);
+    scan.setStopRow(stopRow);
+    //scan.setTimeRange(lower, Long.MAX_VALUE);
+    scan.addFamily(BackupSystemTable.META_FAMILY);
+    scan.setMaxVersions(1);
+    return scan;
+  }
+
+  static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId,
+      long ts, int idx) {
+    Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId+BLK_LD_DELIM+ts+BLK_LD_DELIM+idx));
+    put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName());
+    put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam);
+    put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, p.getBytes());
+    return put;
+  }
   /**
    * Creates put list for list of WAL files
    * @param files list of WAL file paths
@@ -1364,7 +1751,7 @@ public final class BackupSystemTable implements Closeable {
     return Bytes.toString(data).substring(SET_KEY_PREFIX.length());
   }
 
-  private byte[] rowkey(String s, String... other) {
+  private static byte[] rowkey(String s, String... other) {
     StringBuilder sb = new StringBuilder(s);
     for (String ss : other) {
       sb.append(ss);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 395ed6d..8f6f264 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -18,15 +18,21 @@
 
 package org.apache.hadoop.hbase.backup.impl;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
@@ -40,6 +46,10 @@ import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * Incremental backup implementation.
@@ -154,6 +164,118 @@ public class IncrementalTableBackupClient extends TableBackupClient {
     return list;
   }
 
+  static int getIndex(TableName tbl, List<TableName> sTableList) {
+    if (sTableList == null) return 0;
+    for (int i = 0; i < sTableList.size(); i++) {
+      if (tbl.equals(sTableList.get(i))) {
+        return i;
+      }
+    }
+    return -1;
+  }
+
+  /*
+   * Reads bulk load records from backup table, iterates through the records and forms the paths
+   * for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination
+   * @param sTableList list of tables to be backed up
+   * @return map of table to List of files
+   */
+  Map<byte[], List<Path>>[] handleBulkLoad(List<TableName> sTableList) throws IOException {
+    Map<byte[], List<Path>>[] mapForSrc = new Map[sTableList.size()];
+    Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair =
+    backupManager.readBulkloadRows(sTableList);
+    Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> map = pair.getFirst();
+    FileSystem fs = FileSystem.get(conf);
+    FileSystem tgtFs;
+    try {
+      tgtFs = FileSystem.get(new URI(backupInfo.getBackupRootDir()), conf);
+    } catch (URISyntaxException use) {
+      throw new IOException("Unable to get FileSystem", use);
+    }
+    Path rootdir = FSUtils.getRootDir(conf);
+    Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);
+    for (Map.Entry<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>> tblEntry :
+      map.entrySet()) {
+      TableName srcTable = tblEntry.getKey();
+      int srcIdx = getIndex(srcTable, sTableList);
+      if (srcIdx < 0) {
+        LOG.warn("Couldn't find " + srcTable + " in source table List");
+        continue;
+      }
+      if (mapForSrc[srcIdx] == null) {
+        mapForSrc[srcIdx] = new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR);
+      }
+      Path tblDir = FSUtils.getTableDir(rootdir, srcTable);
+      Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()),
+          srcTable.getQualifierAsString());
+      for (Map.Entry<String,Map<String,List<Pair<String, Boolean>>>> regionEntry :
+        tblEntry.getValue().entrySet()){
+        String regionName = regionEntry.getKey();
+        Path regionDir = new Path(tblDir, regionName);
+        // map from family to List of hfiles
+        for (Map.Entry<String,List<Pair<String, Boolean>>> famEntry :
+          regionEntry.getValue().entrySet()) {
+          String fam = famEntry.getKey();
+          Path famDir = new Path(regionDir, fam);
+          List<Path> files;
+          if (!mapForSrc[srcIdx].containsKey(fam.getBytes())) {
+            files = new ArrayList<Path>();
+            mapForSrc[srcIdx].put(fam.getBytes(), files);
+          } else {
+            files = mapForSrc[srcIdx].get(fam.getBytes());
+          }
+          Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
+          String tblName = srcTable.getQualifierAsString();
+          Path tgtFam = new Path(new Path(tgtTable, regionName), fam);
+          if (!tgtFs.mkdirs(tgtFam)) {
+            throw new IOException("couldn't create " + tgtFam);
+          }
+          for (Pair<String, Boolean> fileWithState : famEntry.getValue()) {
+            String file = fileWithState.getFirst();
+            boolean raw = fileWithState.getSecond();
+            int idx = file.lastIndexOf("/");
+            String filename = file;
+            if (idx > 0) {
+              filename = file.substring(idx+1);
+            }
+            Path p = new Path(famDir, filename);
+            Path tgt = new Path(tgtFam, filename);
+            Path archive = new Path(archiveDir, filename);
+            if (fs.exists(p)) {
+              if (LOG.isTraceEnabled()) {
+                LOG.trace("found bulk hfile " + file + " in " + famDir + " for " + tblName);
+              }
+              try {
+                if (LOG.isTraceEnabled()) {
+                  LOG.trace("copying " + p + " to " + tgt);
+                }
+                FileUtil.copy(fs, p, tgtFs, tgt, false,conf);
+              } catch (FileNotFoundException e) {
+                LOG.debug("copying archive " + archive + " to " + tgt);
+                try {
+                  FileUtil.copy(fs, archive, tgtFs, tgt, false, conf);
+                } catch (FileNotFoundException fnfe) {
+                  if (!raw) throw fnfe;
+                }
+              }
+            } else {
+              LOG.debug("copying archive " + archive + " to " + tgt);
+              try {
+                FileUtil.copy(fs, archive, tgtFs, tgt, false, conf);
+              } catch (FileNotFoundException fnfe) {
+                if (!raw) throw fnfe;
+              }
+            }
+            files.add(tgt);
+          }
+        }
+      }
+    }
+    backupManager.writeBulkLoadedFiles(sTableList, mapForSrc);
+    backupManager.removeBulkLoadedRows(sTableList, pair.getSecond());
+    return mapForSrc;
+  }
+
   @Override
   public void execute() throws IOException {
 
@@ -204,6 +326,8 @@ public class IncrementalTableBackupClient extends TableBackupClient {
           BackupUtils.getMinValue(BackupUtils
               .getRSLogTimestampMins(newTableSetTimestampMap));
       backupManager.writeBackupStartCode(newStartCode);
+
+      handleBulkLoad(backupInfo.getTableNames());
       // backup complete
       completeBackup(conn, backupInfo, backupManager, BackupType.INCREMENTAL, conf);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
index f418305..2e4ecce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
@@ -19,9 +19,14 @@
 package org.apache.hadoop.hbase.backup.impl;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.commons.lang.StringUtils;
@@ -34,10 +39,13 @@ import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
 import org.apache.hadoop.hbase.backup.RestoreRequest;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreJob;
 import org.apache.hadoop.hbase.backup.util.RestoreTool;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
 
 /**
  * Restore table implementation
@@ -50,6 +58,7 @@ public class RestoreTablesClient {
   private Configuration conf;
   private Connection conn;
   private String backupId;
+  private String fullBackupId;
   private TableName[] sTableArray;
   private TableName[] tTableArray;
   private String targetRootDir;
@@ -141,6 +150,7 @@ public class RestoreTablesClient {
     // We need hFS only for full restore (see the code)
     BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId);
     if (manifest.getType() == BackupType.FULL) {
+      fullBackupId = manifest.getBackupImage().getBackupId();
       LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image "
           + tableBackupPath.toString());
       restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists,
@@ -170,7 +180,6 @@ public class RestoreTablesClient {
     restoreTool.incrementalRestoreTable(conn, tableBackupPath, paths, new TableName[] { sTable },
       new TableName[] { tTable }, lastIncrBackupId);
     LOG.info(sTable + " has been successfully restored to " + tTable);
-
   }
 
   /**
@@ -185,39 +194,74 @@ public class RestoreTablesClient {
       TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException {
     TreeSet<BackupImage> restoreImageSet = new TreeSet<BackupImage>();
     boolean truncateIfExists = isOverwrite;
-    try {
-      for (int i = 0; i < sTableArray.length; i++) {
-        TableName table = sTableArray[i];
-        BackupManifest manifest = backupManifestMap.get(table);
-        // Get the image list of this backup for restore in time order from old
-        // to new.
-        List<BackupImage> list = new ArrayList<BackupImage>();
-        list.add(manifest.getBackupImage());
-        TreeSet<BackupImage> set = new TreeSet<BackupImage>(list);
-        List<BackupImage> depList = manifest.getDependentListByTable(table);
-        set.addAll(depList);
-        BackupImage[] arr = new BackupImage[set.size()];
-        set.toArray(arr);
-        restoreImages(arr, table, tTableArray[i], truncateIfExists);
-        restoreImageSet.addAll(list);
-        if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
-          LOG.info("Restore includes the following image(s):");
-          for (BackupImage image : restoreImageSet) {
-            LOG.info("Backup: "
-                + image.getBackupId()
-                + " "
-                + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(),
+    Set<String> backupIdSet = new HashSet<>();
+    for (int i = 0; i < sTableArray.length; i++) {
+      TableName table = sTableArray[i];
+      BackupManifest manifest = backupManifestMap.get(table);
+      // Get the image list of this backup for restore in time order from old
+      // to new.
+      List<BackupImage> list = new ArrayList<BackupImage>();
+      list.add(manifest.getBackupImage());
+      TreeSet<BackupImage> set = new TreeSet<BackupImage>(list);
+      List<BackupImage> depList = manifest.getDependentListByTable(table);
+      set.addAll(depList);
+      BackupImage[] arr = new BackupImage[set.size()];
+      set.toArray(arr);
+      restoreImages(arr, table, tTableArray[i], truncateIfExists);
+      restoreImageSet.addAll(list);
+      if (restoreImageSet != null && !restoreImageSet.isEmpty()) {
+        LOG.info("Restore includes the following image(s):");
+        for (BackupImage image : restoreImageSet) {
+          LOG.info("Backup: "
+              + image.getBackupId()
+              + " "
+              + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(),
                   table));
+          if (image.getType() == BackupType.INCREMENTAL) {
+            backupIdSet.add(image.getBackupId());
+            LOG.debug("adding " + image.getBackupId() + " for bulk load");
+          }
+        }
+      }
+    }
+    try (BackupSystemTable table = new BackupSystemTable(conn)) {
+      List<TableName> sTableList = Arrays.asList(sTableArray);
+      for (String id : backupIdSet) {
+        LOG.debug("restoring bulk load for " + id);
+        Map<byte[], List<Path>>[] mapForSrc = table.readBulkLoadedFiles(id, sTableList);
+        Map<LoadQueueItem, ByteBuffer> loaderResult;
+        conf.setBoolean(LoadIncrementalHFiles.ALWAYS_COPY_FILES, true);
+        LoadIncrementalHFiles loader = MapReduceRestoreJob.createLoader(conf);
+        for (int i = 0; i < sTableList.size(); i++) {
+          if (mapForSrc[i] != null && !mapForSrc[i].isEmpty()) {
+            loaderResult = loader.run(null, mapForSrc[i], tTableArray[i]);
+            LOG.debug("bulk loading " + sTableList.get(i) + " to " + tTableArray[i]);
+            if (loaderResult.isEmpty()) {
+              String msg = "Couldn't bulk load for " + sTableList.get(i) + " to " +tTableArray[i];
+              LOG.error(msg);
+              throw new IOException(msg);
+            }
           }
         }
       }
-    } catch (Exception e) {
-      LOG.error("Failed", e);
-      throw new IOException(e);
     }
     LOG.debug("restoreStage finished");
   }
 
+  static long getTsFromBackupId(String backupId) {
+    if (backupId == null) {
+      return 0;
+    }
+    return Long.parseLong(backupId.substring(backupId.lastIndexOf("_")+1));
+  }
+
+  static boolean withinRange(long a, long lower, long upper) {
+    if (a < lower || a > upper) {
+      return false;
+    }
+    return true;
+  }
+
   public void execute() throws IOException {
 
     // case VALIDATION:

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
index ffb61ec..9bafe12 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java
@@ -98,7 +98,7 @@ public class MapReduceRestoreJob implements RestoreJob {
         result = player.run(playerArgs);
         if (succeeded(result)) {
           // do bulk load
-          LoadIncrementalHFiles loader = createLoader();
+          LoadIncrementalHFiles loader = createLoader(getConf());
           if (LOG.isDebugEnabled()) {
             LOG.debug("Restoring HFiles from directory " + bulkOutputPath);
           }
@@ -134,13 +134,13 @@ public class MapReduceRestoreJob implements RestoreJob {
     return result == 0;
   }
 
-  private LoadIncrementalHFiles createLoader() throws IOException {
+  public static LoadIncrementalHFiles createLoader(Configuration config) throws IOException {
     // set configuration for restore:
     // LoadIncrementalHFile needs more time
     // <name>hbase.rpc.timeout</name> <value>600000</value>
     // calculates
     Integer milliSecInHour = 3600000;
-    Configuration conf = new Configuration(getConf());
+    Configuration conf = new Configuration(config);
     conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, milliSecInHour);
 
     // By default, it is 32 and loader will fail if # of files in any region exceed this

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index f59e24c..80dfd66 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Deque;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -62,6 +63,9 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
@@ -144,7 +148,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
     initialize();
   }
 
-  private void initialize() throws Exception {
+  private void initialize() throws IOException {
     if (initalized) {
       return;
     }
@@ -282,6 +286,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
     public String toString() {
       return "family:"+ Bytes.toString(family) + " path:" + hfilePath.toString();
     }
+
+    public byte[] getFamily() {
+      return family;
+    }
+
+    public Path getFilePath() {
+      return hfilePath;
+    }
   }
 
   /*
@@ -1184,7 +1196,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
    * If the table is created for the first time, then "completebulkload" reads the files twice.
    * More modifications necessary if we want to avoid doing it.
    */
-  private void createTable(TableName tableName, String dirPath, Admin admin) throws Exception {
+  private void createTable(TableName tableName, String dirPath, Admin admin) throws IOException {
     final Path hfofDir = new Path(dirPath);
     final FileSystem fs = hfofDir.getFileSystem(getConf());
 
@@ -1238,7 +1250,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
   }
 
   public Map<LoadQueueItem, ByteBuffer> run(String dirPath, Map<byte[], List<Path>> map,
-      TableName tableName) throws Exception{
+      TableName tableName) throws IOException {
     initialize();
     try (Connection connection = ConnectionFactory.createConnection(getConf());
         Admin admin = connection.getAdmin()) {
@@ -1261,7 +1273,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
       try (Table table = connection.getTable(tableName);
         RegionLocator locator = connection.getRegionLocator(tableName)) {
         boolean silence = "yes".equalsIgnoreCase(getConf().get(IGNORE_UNMATCHED_CF_CONF_KEY, ""));
-        boolean copyFiles = "yes".equalsIgnoreCase(getConf().get(ALWAYS_COPY_FILES, ""));
+        boolean copyFiles = getConf().getBoolean(ALWAYS_COPY_FILES, false);
         if (dirPath != null) {
           doBulkLoad(hfofDir, admin, table, locator, silence, copyFiles);
         } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index faae4ef..8e3e105 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -167,4 +167,20 @@ public class HFileArchiveUtil {
   private static Path getArchivePath(final Path rootdir) {
     return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY);
   }
+  
+  /*
+   * @return table name given archive file path
+   */
+  public static TableName getTableName(Path archivePath) {
+    Path p = archivePath;
+    String tbl = null;
+    // namespace is the 4th parent of file
+    for (int i = 0; i < 5; i++) {
+      if (p == null) return null;
+      if (i == 3) tbl = p.getName();
+      p = p.getParent();
+    }
+    if (p == null) return null;
+    return TableName.valueOf(p.getName(), tbl);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index ec88549..e6bd73e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -49,6 +49,10 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.mapreduce.HadoopSecurityEnabledUserProviderForTesting;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.SecureTestUtil;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WALFactory;
@@ -88,6 +92,7 @@ public class TestBackupBase {
   protected static String BACKUP_ROOT_DIR = "/backupUT";
   protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
   protected static String provider = "defaultProvider";
+  protected static boolean secure = false;
 
   /**
    * @throws java.lang.Exception
@@ -96,6 +101,16 @@ public class TestBackupBase {
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL = new HBaseTestingUtility();
     conf1 = TEST_UTIL.getConfiguration();
+    if (secure) {
+      // set the always on security provider
+      UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
+          HadoopSecurityEnabledUserProviderForTesting.class);
+      // setup configuration
+      SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
+    }
+    String coproc = conf1.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
+    conf1.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : coproc + ",") +
+        BackupObserver.class.getName());
     conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
     BackupManager.decorateMasterConfiguration(conf1);
     BackupManager.decorateRegionServerConfiguration(conf1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
new file mode 100644
index 0000000..dfbe106
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestBackupHFileCleaner {
+  private static final Log LOG = LogFactory.getLog(TestBackupHFileCleaner.class);
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static Configuration conf = TEST_UTIL.getConfiguration();
+  private static TableName tableName = TableName.valueOf("backup.hfile.cleaner");
+  private static String famName = "fam";
+  static FileSystem fs = null;
+  Path root;
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
+    TEST_UTIL.startMiniZKCluster();
+    TEST_UTIL.startMiniCluster(1);
+    fs = FileSystem.get(conf);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    if (fs != null) {
+      fs.close();
+    }
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setup() throws IOException {
+    root = TEST_UTIL.getDataTestDirOnTestFS();
+  }
+
+  @After
+  public void cleanup() {
+    try {
+      fs.delete(root, true);
+    } catch (IOException e) {
+      LOG.warn("Failed to delete files recursively from path " + root);
+    }
+  }
+
+  @Test
+  public void testGetDeletableFiles() throws IOException {
+    // 1. Create a file
+    Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
+    fs.createNewFile(file);
+    // 2. Assert file is successfully created
+    assertTrue("Test file not created!", fs.exists(file));
+    BackupHFileCleaner cleaner = new BackupHFileCleaner();
+    cleaner.setConf(conf);
+    cleaner.setCheckForFullyBackedUpTables(false);
+    // 3. Assert that file as is should be deletable
+    List<FileStatus> stats = new ArrayList<>();
+    FileStatus stat = fs.getFileStatus(file);
+    stats.add(stat);
+    Iterable<FileStatus> deletable = cleaner.getDeletableFiles(stats);
+    deletable = cleaner.getDeletableFiles(stats);
+    boolean found = false;
+    for (FileStatus stat1 : deletable) {
+      if (stat.equals(stat1)) found = true;
+    }
+    assertTrue("Cleaner should allow to delete this file as there is no hfile reference "
+        + "for it.", found);
+
+    // 4. Add the file as bulk load
+    List<Path> list = new ArrayList<>(1);
+    list.add(file);
+    try (Connection conn = ConnectionFactory.createConnection(conf);
+        BackupSystemTable sysTbl = new BackupSystemTable(conn)) {
+      List<TableName> sTableList = new ArrayList<>();
+      sTableList.add(tableName);
+      Map<byte[], List<Path>>[] maps = new Map[1];
+      maps[0] = new HashMap<>();
+      maps[0].put(famName.getBytes(), list);
+      sysTbl.writeBulkLoadedFiles(sTableList, maps, "1");
+    }
+
+    // 5. Assert file should not be deletable
+    deletable = cleaner.getDeletableFiles(stats);
+    deletable = cleaner.getDeletableFiles(stats);
+    found = false;
+    for (FileStatus stat1 : deletable) {
+      if (stat.equals(stat1)) found = true;
+    }
+    assertFalse("Cleaner should not allow to delete this file as there is a hfile reference "
+        + "for it.", found);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
new file mode 100644
index 0000000..c10ec40
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFiles;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import com.google.common.collect.Lists;
+
+/**
+ * 1. Create table t1
+ * 2. Load data to t1
+ * 3 Full backup t1
+ * 4 Load data to t1
+ * 5 bulk load into t1
+ * 6 Incremental backup t1
+ */
+@Category(LargeTests.class)
+@RunWith(Parameterized.class)
+public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
+  private static final Log LOG = LogFactory.getLog(TestIncrementalBackupDeleteTable.class);
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    secure = true;
+    List<Object[]> params = new ArrayList<Object[]>();
+    params.add(new Object[] {Boolean.TRUE});
+    return params;
+  }
+
+  public TestIncrementalBackupWithBulkLoad(Boolean b) {
+  }
+  // implement all test cases in 1 test since incremental backup/restore has dependencies
+  @Test
+  public void TestIncBackupDeleteTable() throws Exception {
+    String testName = "TestIncBackupDeleteTable";
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+
+    List<TableName> tables = Lists.newArrayList(table1);
+    HBaseAdmin admin = null;
+    Connection conn = ConnectionFactory.createConnection(conf1);
+    admin = (HBaseAdmin) conn.getAdmin();
+    BackupAdminImpl client = new BackupAdminImpl(conn);
+
+    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+    String backupIdFull = client.backupTables(request);
+
+    assertTrue(checkSucceeded(backupIdFull));
+
+    // #2 - insert some data to table table1
+    HTable t1 = (HTable) conn.getTable(table1);
+    Put p1;
+    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+      p1 = new Put(Bytes.toBytes("row-t1" + i));
+      p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+      t1.put(p1);
+    }
+
+    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
+    t1.close();
+
+    int NB_ROWS2 = 20;
+    LOG.debug("bulk loading into " + testName);
+    int actual = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
+        qualName, false, null, new byte[][][] {
+      new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
+      new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
+    }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2);
+
+    // #3 - incremental backup for table1
+    tables = Lists.newArrayList(table1);
+    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+    String backupIdIncMultiple = client.backupTables(request);
+    assertTrue(checkSucceeded(backupIdIncMultiple));
+
+    // #5.1 - check tables for full restore */
+    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+
+    // #6 - restore incremental backup for table1
+    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
+    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
+    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple,
+      false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+
+    HTable hTable = (HTable) conn.getTable(table1_restore);
+    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2+actual);
+    request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+
+    backupIdFull = client.backupTables(request);
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair
+      = table.readBulkloadRows(tables);
+      assertTrue("map still has " + pair.getSecond().size() + " entries",
+          pair.getSecond().isEmpty());
+    }
+    assertTrue(checkSucceeded(backupIdFull));
+
+    hTable.close();
+    admin.close();
+    conn.close();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0345fc87/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
index a6dacf7..7ae5afc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
@@ -308,13 +308,14 @@ public class TestLoadIncrementalHFiles {
     runTest(testName, htd, bloomType, preCreateTable, tableSplitKeys, hfileRanges, useMap, false);
   }
 
-  private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
-      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
-      boolean copyFiles) throws Exception {
+  public static int loadHFiles(String testName, HTableDescriptor htd, HBaseTestingUtility util,
+      byte[] fam, byte[] qual, boolean preCreateTable, byte[][] tableSplitKeys,
+      byte[][][] hfileRanges, boolean useMap, boolean deleteFile,
+      boolean copyFiles, int initRowCount, int factor) throws Exception {
     Path dir = util.getDataTestDirOnTestFS(testName);
     FileSystem fs = util.getTestFileSystem();
     dir = dir.makeQualified(fs);
-    Path familyDir = new Path(dir, Bytes.toString(FAMILY));
+    Path familyDir = new Path(dir, Bytes.toString(fam));
 
     int hfileIdx = 0;
     Map<byte[], List<Path>> map = null;
@@ -324,26 +325,26 @@ public class TestLoadIncrementalHFiles {
     }
     if (useMap) {
       map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-      map.put(FAMILY, list);
+      map.put(fam, list);
     }
     Path last = null;
     for (byte[][] range : hfileRanges) {
       byte[] from = range[0];
       byte[] to = range[1];
       Path path = new Path(familyDir, "hfile_" + hfileIdx++);
-      HFileTestUtil.createHFile(util.getConfiguration(), fs, path, FAMILY, QUALIFIER, from, to, 1000);
+      HFileTestUtil.createHFile(util.getConfiguration(), fs, path, fam, qual, from, to, factor);
       if (useMap) {
         last = path;
         list.add(path);
       }
     }
-    int expectedRows = hfileIdx * 1000;
+    int expectedRows = hfileIdx * factor;
 
-    if (preCreateTable || map != null) {
+    final TableName tableName = htd.getTableName();
+    if (!util.getHBaseAdmin().tableExists(tableName) && (preCreateTable || map != null)) {
       util.getAdmin().createTable(htd, tableSplitKeys);
     }
 
-    final TableName tableName = htd.getTableName();
     Configuration conf = util.getConfiguration();
     if (copyFiles) {
       conf.setBoolean(LoadIncrementalHFiles.ALWAYS_COPY_FILES, true);
@@ -351,12 +352,14 @@ public class TestLoadIncrementalHFiles {
     LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
     String [] args= {dir.toString(), tableName.toString()};
     if (useMap) {
-      fs.delete(last);
+      if (deleteFile) fs.delete(last);
       Map<LoadQueueItem, ByteBuffer> loaded = loader.run(null, map, tableName);
-      expectedRows -= 1000;
-      for (LoadQueueItem item : loaded.keySet()) {
-        if (item.hfilePath.getName().equals(last.getName())) {
-          fail(last + " should be missing");
+      if (deleteFile) {
+        expectedRows -= 1000;
+        for (LoadQueueItem item : loaded.keySet()) {
+          if (item.hfilePath.getName().equals(last.getName())) {
+            fail(last + " should be missing");
+          }
         }
       }
     } else {
@@ -365,19 +368,30 @@ public class TestLoadIncrementalHFiles {
 
     if (copyFiles) {
       for (Path p : list) {
-        assertTrue(fs.exists(p));
+        assertTrue(p + " should exist", fs.exists(p));
       }
     }
 
     Table table = util.getConnection().getTable(tableName);
     try {
-      assertEquals(expectedRows, util.countRows(table));
+      assertEquals(initRowCount + expectedRows, util.countRows(table));
     } finally {
       table.close();
     }
 
+    return expectedRows;
+  }
+
+  private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
+      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
+      boolean copyFiles) throws Exception {
+    loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys,
+        hfileRanges, useMap, true, copyFiles, 0, 1000);
+
+    final TableName tableName = htd.getTableName();
     // verify staging folder has been cleaned up
     Path stagingBasePath = new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME);
+    FileSystem fs = util.getTestFileSystem();
     if(fs.exists(stagingBasePath)) {
       FileStatus[] files = fs.listStatus(stagingBasePath);
       for(FileStatus file : files) {


[6/9] hbase git commit: HBASE-17855 Fix typo in async client implementation

Posted by sy...@apache.org.
HBASE-17855 Fix typo in async client implementation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ec14594
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ec14594
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ec14594

Branch: refs/heads/hbase-12439
Commit: 0ec1459467f280280630e5b597bef302ec43cedd
Parents: d033cbb
Author: zhangduo <zh...@apache.org>
Authored: Fri Mar 31 09:37:20 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Fri Mar 31 13:00:27 2017 +0800

----------------------------------------------------------------------
 .../hbase/client/AsyncMetaRegionLocator.java    | 35 ++++++++++----------
 .../hbase/client/AsyncNonMetaRegionLocator.java |  3 +-
 .../hadoop/hbase/client/AsyncRegionLocator.java |  2 +-
 .../client/AbstractTestAsyncTableScan.java      |  8 ++---
 4 files changed, 24 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ec14594/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
index 5b7a68f..6e7dba7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
@@ -94,25 +94,26 @@ class AsyncMetaRegionLocator {
   }
 
   void updateCachedLocation(HRegionLocation loc, Throwable exception) {
-    updateCachedLoation(loc, exception, l -> metaRegionLocation.get(), newLoc -> {
-      for (;;) {
-        HRegionLocation oldLoc = metaRegionLocation.get();
-        if (oldLoc != null && (oldLoc.getSeqNum() > newLoc.getSeqNum()
-            || oldLoc.getServerName().equals(newLoc.getServerName()))) {
-          return;
-        }
-        if (metaRegionLocation.compareAndSet(oldLoc, newLoc)) {
-          return;
+    AsyncRegionLocator.updateCachedLocation(loc, exception, l -> metaRegionLocation.get(),
+      newLoc -> {
+        for (;;) {
+          HRegionLocation oldLoc = metaRegionLocation.get();
+          if (oldLoc != null && (oldLoc.getSeqNum() > newLoc.getSeqNum() ||
+              oldLoc.getServerName().equals(newLoc.getServerName()))) {
+            return;
+          }
+          if (metaRegionLocation.compareAndSet(oldLoc, newLoc)) {
+            return;
+          }
         }
-      }
-    }, l -> {
-      for (;;) {
-        HRegionLocation oldLoc = metaRegionLocation.get();
-        if (!canUpdate(l, oldLoc) || metaRegionLocation.compareAndSet(oldLoc, null)) {
-          return;
+      }, l -> {
+        for (;;) {
+          HRegionLocation oldLoc = metaRegionLocation.get();
+          if (!canUpdate(l, oldLoc) || metaRegionLocation.compareAndSet(oldLoc, null)) {
+            return;
+          }
         }
-      }
-    });
+      });
   }
 
   void clearCache() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ec14594/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index dcf2c91..3dc9537 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -22,7 +22,6 @@ import static org.apache.hadoop.hbase.HConstants.NINES;
 import static org.apache.hadoop.hbase.HConstants.ZEROES;
 import static org.apache.hadoop.hbase.HRegionInfo.createRegionName;
 import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-import static org.apache.hadoop.hbase.client.AsyncRegionLocator.updateCachedLoation;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
@@ -442,7 +441,7 @@ class AsyncNonMetaRegionLocator {
   }
 
   void updateCachedLocation(HRegionLocation loc, Throwable exception) {
-    updateCachedLoation(loc, exception, l -> {
+    AsyncRegionLocator.updateCachedLocation(loc, exception, l -> {
       TableCache tableCache = cache.get(l.getRegionInfo().getTable());
       if (tableCache == null) {
         return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ec14594/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
index 7030eac..1c65472 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
@@ -98,7 +98,7 @@ class AsyncRegionLocator {
         && oldLoc.getServerName().equals(loc.getServerName());
   }
 
-  static void updateCachedLoation(HRegionLocation loc, Throwable exception,
+  static void updateCachedLocation(HRegionLocation loc, Throwable exception,
       Function<HRegionLocation, HRegionLocation> cachedLocationSupplier,
       Consumer<HRegionLocation> addToCache, Consumer<HRegionLocation> removeFromCache) {
     HRegionLocation oldLoc = cachedLocationSupplier.apply(loc);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ec14594/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
index 661ffe2..d4409ca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
@@ -237,11 +237,11 @@ public abstract class AbstractTestAsyncTableScan {
 
   @Test
   public void testScanWithLimit() throws Exception {
- //   testScan(1, true, 998, false, 900); // from first region to last region
+    testScan(1, true, 998, false, 900); // from first region to last region
     testScan(123, true, 234, true, 100);
-  //  testScan(234, true, 456, false, 100);
- //   testScan(345, false, 567, true, 100);
- //   testScan(456, false, 678, false, 100);
+    testScan(234, true, 456, false, 100);
+    testScan(345, false, 567, true, 100);
+    testScan(456, false, 678, false, 100);
   }
 
   @Test


[7/9] hbase git commit: HBASE-17821: The CompoundConfiguration#toString is wrong

Posted by sy...@apache.org.
HBASE-17821: The CompoundConfiguration#toString is wrong

Signed-off-by: CHIA-PING TSAI <ch...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9682ca5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9682ca5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9682ca5

Branch: refs/heads/hbase-12439
Commit: a9682ca5dc46a74edca3da630560fbaf5d0cf38b
Parents: 0ec1459
Author: Yi Liang <ea...@gmail.com>
Authored: Thu Mar 23 13:29:23 2017 -0700
Committer: CHIA-PING TSAI <ch...@gmail.com>
Committed: Fri Mar 31 15:05:44 2017 +0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hbase/CompoundConfiguration.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9682ca5/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
index a7fcba6..5812cca 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
@@ -264,7 +264,7 @@ public class CompoundConfiguration extends Configuration {
     StringBuffer sb = new StringBuffer();
     sb.append("CompoundConfiguration: " + this.configs.size() + " configs");
     for (ImmutableConfigMap m : this.configs) {
-      sb.append(this.configs);
+      sb.append(m);
     }
     return sb.toString();
   }


[2/9] hbase git commit: HBASE-17287 Master becomes a zombie if filesystem object closes

Posted by sy...@apache.org.
HBASE-17287 Master becomes a zombie if filesystem object closes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f159557e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f159557e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f159557e

Branch: refs/heads/hbase-12439
Commit: f159557eded160680e623b966350ea3442b5f35a
Parents: 0345fc8
Author: tedyu <yu...@gmail.com>
Authored: Tue Mar 28 20:41:46 2017 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue Mar 28 20:41:46 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/master/MasterWalManager.java   |   5 +
 .../procedure/TestSafemodeBringsDownMaster.java | 125 +++++++++++++++++++
 2 files changed, 130 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f159557e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index 27aca94..e67af14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -270,6 +270,11 @@ public class MasterWalManager {
         }
         logDirs.add(splitDir);
       }
+    } catch (IOException ioe) {
+      if (!checkFileSystem()) {
+        this.services.abort("Aborting due to filesystem unavailable", ioe);
+        throw ioe;
+      }
     } finally {
       if (needReleaseLock) {
         this.splitLogLock.unlock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f159557e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java
new file mode 100644
index 0000000..c255843
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestSafemodeBringsDownMaster {
+  private static final Log LOG = LogFactory.getLog(TestSafemodeBringsDownMaster.class);
+
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static void setupConf(Configuration conf) {
+    conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+    conf.set(BaseLoadBalancer.TABLES_ON_MASTER, "none");
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void cleanupTest() throws Exception {
+    try {
+      UTIL.shutdownMiniCluster();
+    } catch (Exception e) {
+      LOG.warn("failure shutting down cluster", e);
+    }
+  }
+
+  @Before
+  public void setup() throws Exception {
+    resetProcExecutorTestingKillFlag();
+  }
+
+  private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+    return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+  }
+  private void resetProcExecutorTestingKillFlag() {
+    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+    assertTrue("expected executor to be running", procExec.isRunning());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test(timeout=60000)
+  public void testSafemodeBringsDownMaster() throws Exception {
+    final TableName tableName = TableName.valueOf("testSafemodeBringsDownMaster");
+    final byte[][] splitKeys = new byte[][] {
+      Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+    };
+    HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+        getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+    MiniDFSCluster dfsCluster = UTIL.getDFSCluster();
+    DistributedFileSystem dfs = (DistributedFileSystem) dfsCluster.getFileSystem();
+    dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+    final long timeOut = 180000;
+    long startTime = System.currentTimeMillis();
+    int index = -1;
+    do {
+      index = UTIL.getMiniHBaseCluster().getServerWithMeta();
+    } while (index == -1 &&
+      startTime + timeOut < System.currentTimeMillis());
+
+    if (index != -1){
+      UTIL.getMiniHBaseCluster().abortRegionServer(index);
+      UTIL.getMiniHBaseCluster().waitOnRegionServer(index);
+    }
+    UTIL.waitFor(timeOut, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        List<JVMClusterUtil.MasterThread> threads = UTIL.getMiniHBaseCluster().getLiveMasterThreads();
+        return threads == null || threads.isEmpty();
+      }
+    });
+    dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+  }
+}


[9/9] hbase git commit: Add diversity statement to CoC

Posted by sy...@apache.org.
Add diversity statement to CoC

Signed-off-by: Misty Stanley-Jones <mi...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c4d9c89
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c4d9c89
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c4d9c89

Branch: refs/heads/hbase-12439
Commit: 1c4d9c8965952cbd17f0afdacbb0c0ac1e5bd1d7
Parents: 7700a7f
Author: Misty Stanley-Jones <mi...@apache.org>
Authored: Fri Mar 31 12:49:02 2017 -0700
Committer: Misty Stanley-Jones <mi...@apache.org>
Committed: Fri Mar 31 12:49:37 2017 -0700

----------------------------------------------------------------------
 src/main/site/xdoc/coc.xml | 46 +++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c4d9c89/src/main/site/xdoc/coc.xml
----------------------------------------------------------------------
diff --git a/src/main/site/xdoc/coc.xml b/src/main/site/xdoc/coc.xml
index e091507..fc2b549 100644
--- a/src/main/site/xdoc/coc.xml
+++ b/src/main/site/xdoc/coc.xml
@@ -32,13 +32,25 @@ under the License.
   <body>
   <section name="Code of Conduct Policy">
 <p>
-We expect participants in discussions on the HBase project mailing lists, IRC channels, and JIRA issues to abide by the Apache Software Foundation's <a href="http://apache.org/foundation/policies/conduct.html">Code of Conduct</a>.
+We expect participants in discussions on the HBase project mailing lists, IRC
+channels, and JIRA issues to abide by the Apache Software Foundation's
+<a href="http://apache.org/foundation/policies/conduct.html">Code of Conduct</a>.
 </p>
 <p>
-If you feel there had been a violation of this code, please point out your concerns publicly in a friendly and matter of fact manner. Nonverbal communication is prone to misinterpretation and misunderstanding. Everyone has bad days and sometimes says things they regret later. Someone else's communication style may clash with yours, but the difference can be amicably resolved. After pointing out your concerns please be generous upon receiving an apology.
+If you feel there has been a violation of this code, please point out your
+concerns publicly in a friendly and matter of fact manner. Nonverbal
+communication is prone to misinterpretation and misunderstanding. Everyone has
+bad days and sometimes says things they regret later. Someone else's
+communication style may clash with yours, but the difference can be amicably
+resolved. After pointing out your concerns please be generous upon receiving an
+apology.
 </p>
 <p>
-Should there be repeated instances of code of conduct violations, or if there is an obvious and severe violation, the HBase PMC may become involved. When this happens the PMC will openly discuss the matter, most likely on the dev@hbase mailing list, and will consider taking the following actions, in order, if there is a continuing problem with an individual:
+Should there be repeated instances of code of conduct violations, or if there is
+an obvious and severe violation, the HBase PMC may become involved. When this
+happens the PMC will openly discuss the matter, most likely on the dev@hbase
+mailing list, and will consider taking the following actions, in order, if there
+is a continuing problem with an individual:
 <ol>
 <li>A friendly off-list warning;</li>
 <li>A friendly public warning, if the communication at issue was on list, otherwise another off-list warning;</li>
@@ -47,7 +59,33 @@ Should there be repeated instances of code of conduct violations, or if there is
 </ol>
 </p>
 <p>
-For flagrant violations requiring a firm response the PMC may opt to skip early steps. No action will be taken before public discussion leading to consensus or a successful majority vote. 
+For flagrant violations requiring a firm response the PMC may opt to skip early
+steps. No action will be taken before public discussion leading to consensus or
+a successful majority vote.
+</p>
+  </section>
+  <section name="Diversity Statement">
+<p>
+As a project and a community, we encourage you to participate in the HBase project
+in whatever capacity suits you, whether it involves development, documentation,
+answering questions on mailing lists, triaging issue and patch review, managing
+releases, or any other way that you want to help. We appreciate your
+contributions and the time you dedicate to the HBase project. We strive to
+recognize the work of participants publicly. Please let us know if we can
+improve in this area.
+</p>
+<p>
+We value diversity and strive to support participation by people with all
+different backgrounds. Rich projects grow from groups with different points of
+view and different backgrounds. We welcome your suggestions about how we can
+welcome participation by people at all skill levels and with all aspects of the
+project.
+</p>
+<p>
+If you can think of something we are doing that we shouldn't, or something that
+we should do but aren't, please let us know. If you feel comfortable doing so,
+use the public mailing lists. Otherwise, reach out to a PMC member or send an
+email to <a href="mailto:private@hbase.apache.org">the private PMC mailing list</a>.
 </p>
   </section>
   </body>


[4/9] hbase git commit: HBASE-17520 Implement isTableEnabled/Disabled/Available methods

Posted by sy...@apache.org.
HBASE-17520 Implement isTableEnabled/Disabled/Available methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/752b258b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/752b258b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/752b258b

Branch: refs/heads/hbase-12439
Commit: 752b258b7c30aa375b5bb7a33abf435f37e8c877
Parents: b290d14
Author: Guanghao Zhang <zg...@apache.org>
Authored: Thu Mar 30 13:37:17 2017 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Thu Mar 30 13:37:17 2017 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/AsyncMetaTableAccessor.java    | 432 ++++++++++++++++++-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  31 ++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java    | 109 ++++-
 .../hbase/client/TestAsyncTableAdminApi.java    |  34 +-
 4 files changed, 603 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/752b258b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
index d09d29e..6988047 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
@@ -20,20 +20,33 @@ package org.apache.hadoop.hbase;
 import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
 import java.util.Optional;
+import java.util.SortedMap;
 import java.util.concurrent.CompletableFuture;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor;
+import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
+import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.RawAsyncTable;
+import org.apache.hadoop.hbase.client.RawScanResultConsumer;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 
@@ -46,6 +59,14 @@ public class AsyncMetaTableAccessor {
 
   private static final Log LOG = LogFactory.getLog(AsyncMetaTableAccessor.class);
 
+
+  /** The delimiter for meta columns for replicaIds &gt; 0 */
+  private static final char META_REPLICA_ID_DELIMITER = '_';
+
+  /** A regex for parsing server columns from meta. See above javadoc for meta layout */
+  private static final Pattern SERVER_COLUMN_PATTERN = Pattern
+      .compile("^server(_[0-9a-fA-F]{4})?$");
+
   public static CompletableFuture<Boolean> tableExists(RawAsyncTable metaTable, TableName tableName) {
     if (tableName.equals(META_TABLE_NAME)) {
       return CompletableFuture.completedFuture(true);
@@ -122,6 +143,350 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
+   * Used to get table regions' info and server.
+   * @param metaTable
+   * @param tableName table we're looking for, can be null for getting all regions
+   * @return the list of regioninfos and server. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  public static CompletableFuture<List<Pair<HRegionInfo, ServerName>>> getTableRegionsAndLocations(
+      RawAsyncTable metaTable, final Optional<TableName> tableName) {
+    return getTableRegionsAndLocations(metaTable, tableName, true);
+  }
+
+  /**
+   * Used to get table regions' info and server.
+   * @param metaTable
+   * @param tableName table we're looking for, can be null for getting all regions
+   * @param excludeOfflinedSplitParents don't return split parents
+   * @return the list of regioninfos and server. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  public static CompletableFuture<List<Pair<HRegionInfo, ServerName>>> getTableRegionsAndLocations(
+      RawAsyncTable metaTable, final Optional<TableName> tableName,
+      final boolean excludeOfflinedSplitParents) {
+    CompletableFuture<List<Pair<HRegionInfo, ServerName>>> future = new CompletableFuture<>();
+    if (tableName.filter((t) -> t.equals(TableName.META_TABLE_NAME)).isPresent()) {
+      future.completeExceptionally(new IOException(
+          "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"));
+    }
+
+    // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
+    CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor = new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
+      private Optional<RegionLocations> current = null;
+
+      @Override
+      public boolean visit(Result r) throws IOException {
+        current = getRegionLocations(r);
+        if (!current.isPresent() || current.get().getRegionLocation().getRegionInfo() == null) {
+          LOG.warn("No serialized HRegionInfo in " + r);
+          return true;
+        }
+        HRegionInfo hri = current.get().getRegionLocation().getRegionInfo();
+        if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
+        // Else call super and add this Result to the collection.
+        return super.visit(r);
+      }
+
+      @Override
+      void add(Result r) {
+        if (!current.isPresent()) {
+          return;
+        }
+        for (HRegionLocation loc : current.get().getRegionLocations()) {
+          if (loc != null) {
+            this.results.add(new Pair<HRegionInfo, ServerName>(loc.getRegionInfo(), loc
+                .getServerName()));
+          }
+        }
+      }
+    };
+
+    scanMeta(metaTable, tableName, QueryType.REGION, visitor).whenComplete((v, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      future.complete(visitor.getResults());
+    });
+    return future;
+  }
+
+  /**
+   * Performs a scan of META table for given table.
+   * @param metaTable
+   * @param tableName table withing we scan
+   * @param type scanned part of meta
+   * @param visitor Visitor invoked against each row
+   */
+  private static CompletableFuture<Void> scanMeta(RawAsyncTable metaTable,
+      Optional<TableName> tableName, QueryType type, final Visitor visitor) {
+    return scanMeta(metaTable, getTableStartRowForMeta(tableName, type),
+      getTableStopRowForMeta(tableName, type), type, Integer.MAX_VALUE, visitor);
+  }
+
+  /**
+   * Performs a scan of META table for given table.
+   * @param metaTable
+   * @param startRow Where to start the scan
+   * @param stopRow Where to stop the scan
+   * @param type scanned part of meta
+   * @param maxRows maximum rows to return
+   * @param visitor Visitor invoked against each row
+   */
+  private static CompletableFuture<Void> scanMeta(RawAsyncTable metaTable, Optional<byte[]> startRow,
+      Optional<byte[]> stopRow, QueryType type, int maxRows, final Visitor visitor) {
+    int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
+    Scan scan = getMetaScan(metaTable, rowUpperLimit);
+    for (byte[] family : type.getFamilies()) {
+      scan.addFamily(family);
+    }
+    startRow.ifPresent(scan::withStartRow);
+    stopRow.ifPresent(scan::withStopRow);
+
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(scan.getStartRow())
+          + " stopping at row=" + Bytes.toStringBinary(scan.getStopRow()) + " for max="
+          + rowUpperLimit + " with caching=" + scan.getCaching());
+    }
+
+    CompletableFuture<Void> future = new CompletableFuture<Void>();
+    metaTable.scan(scan, new MetaTableRawScanResultConsumer(rowUpperLimit, visitor, future));
+    return future;
+  }
+
+  private static final class MetaTableRawScanResultConsumer implements RawScanResultConsumer {
+
+    private int currentRowCount;
+
+    private final int rowUpperLimit;
+
+    private final Visitor visitor;
+
+    private final CompletableFuture<Void> future;
+
+    MetaTableRawScanResultConsumer(int rowUpperLimit, Visitor visitor, CompletableFuture<Void> future) {
+      this.rowUpperLimit = rowUpperLimit;
+      this.visitor = visitor;
+      this.future = future;
+      this.currentRowCount = 0;
+    }
+
+    @Override
+    public void onError(Throwable error) {
+      future.completeExceptionally(error);
+    }
+
+    @Override
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
+      justification = "https://github.com/findbugsproject/findbugs/issues/79")
+    public void onComplete() {
+      future.complete(null);
+    }
+
+    @Override
+    public void onNext(Result[] results, ScanController controller) {
+      for (Result result : results) {
+        try {
+          if (!visitor.visit(result)) {
+            controller.terminate();
+          }
+        } catch (IOException e) {
+          future.completeExceptionally(e);
+          controller.terminate();
+        }
+        if (++currentRowCount >= rowUpperLimit) {
+          controller.terminate();
+        }
+      }
+    }
+  }
+
+  private static Scan getMetaScan(RawAsyncTable metaTable, int rowUpperLimit) {
+    Scan scan = new Scan();
+    int scannerCaching = metaTable.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING,
+      HConstants.DEFAULT_HBASE_META_SCANNER_CACHING);
+    if (metaTable.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS,
+      HConstants.DEFAULT_USE_META_REPLICAS)) {
+      scan.setConsistency(Consistency.TIMELINE);
+    }
+    if (rowUpperLimit <= scannerCaching) {
+      scan.setLimit(rowUpperLimit);
+    }
+    int rows = Math.min(rowUpperLimit, scannerCaching);
+    scan.setCaching(rows);
+    return scan;
+  }
+
+  /**
+   * Returns an HRegionLocationList extracted from the result.
+   * @return an HRegionLocationList containing all locations for the region range or null if we
+   *         can't deserialize the result.
+   */
+  private static Optional<RegionLocations> getRegionLocations(final Result r) {
+    if (r == null) return Optional.empty();
+    Optional<HRegionInfo> regionInfo = getHRegionInfo(r, getRegionInfoColumn());
+    if (!regionInfo.isPresent()) return Optional.empty();
+
+    List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
+    NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyMap = r.getNoVersionMap();
+
+    locations.add(getRegionLocation(r, regionInfo.get(), 0));
+
+    NavigableMap<byte[], byte[]> infoMap = familyMap.get(getCatalogFamily());
+    if (infoMap == null) return Optional.of(new RegionLocations(locations));
+
+    // iterate until all serverName columns are seen
+    int replicaId = 0;
+    byte[] serverColumn = getServerColumn(replicaId);
+    SortedMap<byte[], byte[]> serverMap = null;
+    serverMap = infoMap.tailMap(serverColumn, false);
+
+    if (serverMap.isEmpty()) return Optional.of(new RegionLocations(locations));
+
+    for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
+      replicaId = parseReplicaIdFromServerColumn(entry.getKey());
+      if (replicaId < 0) {
+        break;
+      }
+      HRegionLocation location = getRegionLocation(r, regionInfo.get(), replicaId);
+      // In case the region replica is newly created, it's location might be null. We usually do not
+      // have HRL's in RegionLocations object with null ServerName. They are handled as null HRLs.
+      if (location == null || location.getServerName() == null) {
+        locations.add(null);
+      } else {
+        locations.add(location);
+      }
+    }
+
+    return Optional.of(new RegionLocations(locations));
+  }
+
+  /**
+   * Returns the HRegionLocation parsed from the given meta row Result
+   * for the given regionInfo and replicaId. The regionInfo can be the default region info
+   * for the replica.
+   * @param r the meta row result
+   * @param regionInfo RegionInfo for default replica
+   * @param replicaId the replicaId for the HRegionLocation
+   * @return HRegionLocation parsed from the given meta row Result for the given replicaId
+   */
+  private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
+      final int replicaId) {
+    Optional<ServerName> serverName = getServerName(r, replicaId);
+    long seqNum = getSeqNumDuringOpen(r, replicaId);
+    HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
+    return new HRegionLocation(replicaInfo, serverName.orElse(null), seqNum);
+  }
+
+  /**
+   * Returns a {@link ServerName} from catalog table {@link Result}.
+   * @param r Result to pull from
+   * @return A ServerName instance.
+   */
+  private static Optional<ServerName> getServerName(final Result r, final int replicaId) {
+    byte[] serverColumn = getServerColumn(replicaId);
+    Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn);
+    if (cell == null || cell.getValueLength() == 0) return Optional.empty();
+    String hostAndPort = Bytes.toString(cell.getValueArray(), cell.getValueOffset(),
+      cell.getValueLength());
+    byte[] startcodeColumn = getStartCodeColumn(replicaId);
+    cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn);
+    if (cell == null || cell.getValueLength() == 0) return Optional.empty();
+    try {
+      return Optional.of(ServerName.valueOf(hostAndPort,
+        Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())));
+    } catch (IllegalArgumentException e) {
+      LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * The latest seqnum that the server writing to meta observed when opening the region.
+   * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
+   * @param r Result to pull the seqNum from
+   * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
+   */
+  private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
+    Cell cell = r.getColumnLatestCell(getCatalogFamily(), getSeqNumColumn(replicaId));
+    if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
+    return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+  }
+
+  /**
+   * @param tableName table we're working with
+   * @return start row for scanning META according to query type
+   */
+  private static Optional<byte[]> getTableStartRowForMeta(Optional<TableName> tableName,
+      QueryType type) {
+    return tableName.map((table) -> {
+      switch (type) {
+      case REGION:
+        byte[] startRow = new byte[table.getName().length + 2];
+        System.arraycopy(table.getName(), 0, startRow, 0, table.getName().length);
+        startRow[startRow.length - 2] = HConstants.DELIMITER;
+        startRow[startRow.length - 1] = HConstants.DELIMITER;
+        return startRow;
+      case ALL:
+      case TABLE:
+      default:
+        return table.getName();
+      }
+    });
+  }
+
+  /**
+   * @param tableName table we're working with
+   * @return stop row for scanning META according to query type
+   */
+  private static Optional<byte[]> getTableStopRowForMeta(Optional<TableName> tableName,
+      QueryType type) {
+    return tableName.map((table) -> {
+      final byte[] stopRow;
+      switch (type) {
+      case REGION:
+        stopRow = new byte[table.getName().length + 3];
+        System.arraycopy(table.getName(), 0, stopRow, 0, table.getName().length);
+        stopRow[stopRow.length - 3] = ' ';
+        stopRow[stopRow.length - 2] = HConstants.DELIMITER;
+        stopRow[stopRow.length - 1] = HConstants.DELIMITER;
+        break;
+      case ALL:
+      case TABLE:
+      default:
+        stopRow = new byte[table.getName().length + 1];
+        System.arraycopy(table.getName(), 0, stopRow, 0, table.getName().length);
+        stopRow[stopRow.length - 1] = ' ';
+        break;
+      }
+      return stopRow;
+    });
+  }
+
+  /**
+   * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
+   * <code>qualifier</code> of the catalog table result.
+   * @param r a Result object from the catalog table scan
+   * @param qualifier Column family qualifier
+   * @return An HRegionInfo instance.
+   */
+  private static Optional<HRegionInfo> getHRegionInfo(final Result r, byte[] qualifier) {
+    Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier);
+    if (cell == null) return Optional.empty();
+    return Optional.ofNullable(HRegionInfo.parseFromOrNull(cell.getValueArray(),
+      cell.getValueOffset(), cell.getValueLength()));
+  }
+
+  /**
+   * Returns the column family used for meta columns.
+   * @return HConstants.CATALOG_FAMILY.
+   */
+  private static byte[] getCatalogFamily() {
+    return HConstants.CATALOG_FAMILY;
+  }
+
+  /**
    * Returns the column family used for table columns.
    * @return HConstants.TABLE_FAMILY.
    */
@@ -130,10 +495,75 @@ public class AsyncMetaTableAccessor {
   }
 
   /**
+   * Returns the column qualifier for serialized region info
+   * @return HConstants.REGIONINFO_QUALIFIER
+   */
+  private static byte[] getRegionInfoColumn() {
+    return HConstants.REGIONINFO_QUALIFIER;
+  }
+
+  /**
    * Returns the column qualifier for serialized table state
    * @return HConstants.TABLE_STATE_QUALIFIER
    */
   private static byte[] getStateColumn() {
     return HConstants.TABLE_STATE_QUALIFIER;
   }
+
+  /**
+   * Returns the column qualifier for server column for replicaId
+   * @param replicaId the replicaId of the region
+   * @return a byte[] for server column qualifier
+   */
+  private static byte[] getServerColumn(int replicaId) {
+    return replicaId == 0
+      ? HConstants.SERVER_QUALIFIER
+      : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+      + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
+  }
+
+  /**
+   * Returns the column qualifier for server start code column for replicaId
+   * @param replicaId the replicaId of the region
+   * @return a byte[] for server start code column qualifier
+   */
+  private static byte[] getStartCodeColumn(int replicaId) {
+    return replicaId == 0
+      ? HConstants.STARTCODE_QUALIFIER
+      : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+      + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
+  }
+
+  /**
+   * Returns the column qualifier for seqNum column for replicaId
+   * @param replicaId the replicaId of the region
+   * @return a byte[] for seqNum column qualifier
+   */
+  private static byte[] getSeqNumColumn(int replicaId) {
+    return replicaId == 0
+      ? HConstants.SEQNUM_QUALIFIER
+      : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+      + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
+  }
+
+  /**
+   * Parses the replicaId from the server column qualifier. See top of the class javadoc
+   * for the actual meta layout
+   * @param serverColumn the column qualifier
+   * @return an int for the replicaId
+   */
+  private static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
+    String serverStr = Bytes.toString(serverColumn);
+
+    Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
+    if (matcher.matches() && matcher.groupCount() > 0) {
+      String group = matcher.group(1);
+      if (group != null && group.length() > 0) {
+        return Integer.parseInt(group.substring(1), 16);
+      } else {
+        return 0;
+      }
+    }
+    return -1;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/752b258b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 5a13ede..9945c40 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -222,6 +222,30 @@ public interface AsyncAdmin {
   CompletableFuture<HTableDescriptor[]> disableTables(Pattern pattern);
 
   /**
+   * @param tableName name of table to check
+   * @return true if table is off-line. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> isTableDisabled(TableName tableName);
+
+  /**
+   * @param tableName name of table to check
+   * @return true if all regions of the table are available. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> isTableAvailable(TableName tableName);
+
+  /**
+   * Use this api to check if the table has been created with the specified number of splitkeys
+   * which was used while creating the given table. Note : If this api is used after a table's
+   * region gets splitted, the api may return false. The return value will be wrapped by a
+   * {@link CompletableFuture}.
+   * @param tableName name of table to check
+   * @param splitKeys keys to check if the table has been created with all split keys
+   */
+  CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys);
+
+  /**
    * Get the status of alter command - indicates how many regions have received the updated schema
    * Asynchronous operation.
    * @param tableName TableName instance
@@ -286,6 +310,13 @@ public interface AsyncAdmin {
   CompletableFuture<NamespaceDescriptor[]> listNamespaceDescriptors();
 
   /**
+   * @param tableName name of table to check
+   * @return true if table is on-line. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> isTableEnabled(TableName tableName);
+
+  /**
    * Turn the load balancer on or off.
    * @param on
    * @return Previous balancer value wrapped by a {@link CompletableFuture}.

http://git-wip-us.apache.org/repos/asf/hbase/blob/752b258b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 5ae30d7..54f0766 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -44,9 +45,9 @@ import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -452,6 +453,112 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
 
 
   @Override
+  public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
+    CompletableFuture<Boolean> future = new CompletableFuture<>();
+    AsyncMetaTableAccessor.getTableState(metaTable, tableName).whenComplete((state, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      if (state.isPresent()) {
+        future.complete(state.get().inStates(TableState.State.ENABLED));
+      } else {
+        future.completeExceptionally(new TableNotFoundException(tableName));
+      }
+    });
+    return future;
+  }
+
+  @Override
+  public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
+    CompletableFuture<Boolean> future = new CompletableFuture<>();
+    AsyncMetaTableAccessor.getTableState(metaTable, tableName).whenComplete((state, error) -> {
+      if (error != null) {
+        future.completeExceptionally(error);
+        return;
+      }
+      if (state.isPresent()) {
+        future.complete(state.get().inStates(TableState.State.DISABLED));
+      } else {
+        future.completeExceptionally(new TableNotFoundException(tableName));
+      }
+    });
+    return future;
+  }
+
+  @Override
+  public CompletableFuture<Boolean> isTableAvailable(TableName tableName) {
+    return isTableAvailable(tableName, null);
+  }
+
+  @Override
+  public CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys) {
+    CompletableFuture<Boolean> future = new CompletableFuture<>();
+    isTableEnabled(tableName).whenComplete(
+      (enabled, error) -> {
+        if (error != null) {
+          future.completeExceptionally(error);
+          return;
+        }
+        if (!enabled) {
+          future.complete(false);
+        } else {
+          AsyncMetaTableAccessor.getTableRegionsAndLocations(metaTable, Optional.of(tableName))
+              .whenComplete(
+                (locations, error1) -> {
+                  if (error1 != null) {
+                    future.completeExceptionally(error1);
+                    return;
+                  }
+                  int notDeployed = 0;
+                  int regionCount = 0;
+                  for (Pair<HRegionInfo, ServerName> pair : locations) {
+                    HRegionInfo info = pair.getFirst();
+                    if (pair.getSecond() == null) {
+                      if (LOG.isDebugEnabled()) {
+                        LOG.debug("Table " + tableName + " has not deployed region "
+                            + pair.getFirst().getEncodedName());
+                      }
+                      notDeployed++;
+                    } else if (splitKeys != null
+                        && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
+                      for (byte[] splitKey : splitKeys) {
+                        // Just check if the splitkey is available
+                        if (Bytes.equals(info.getStartKey(), splitKey)) {
+                          regionCount++;
+                          break;
+                        }
+                      }
+                    } else {
+                      // Always empty start row should be counted
+                      regionCount++;
+                    }
+                  }
+                  if (notDeployed > 0) {
+                    if (LOG.isDebugEnabled()) {
+                      LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
+                    }
+                    future.complete(false);
+                  } else if (splitKeys != null && regionCount != splitKeys.length + 1) {
+                    if (LOG.isDebugEnabled()) {
+                      LOG.debug("Table " + tableName + " expected to have "
+                          + (splitKeys.length + 1) + " regions, but only " + regionCount
+                          + " available");
+                    }
+                    future.complete(false);
+                  } else {
+                    if (LOG.isDebugEnabled()) {
+                      LOG.debug("Table " + tableName + " should be available");
+                    }
+                    future.complete(true);
+                  }
+                });
+        }
+      });
+    return future;
+  }
+
+  @Override
   public CompletableFuture<Pair<Integer, Integer>> getAlterStatus(TableName tableName) {
     return this
         .<Pair<Integer, Integer>>newMasterCaller()

http://git-wip-us.apache.org/repos/asf/hbase/blob/752b258b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 50cd9c6..b7430fe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
+import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -213,7 +216,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
   }
 
   @Test(timeout = 300000)
-  public void testCreateTableWithRegions() throws IOException, InterruptedException {
+  public void testCreateTableWithRegions() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
 
     byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 },
@@ -225,6 +228,9 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
     admin.createTable(desc, splitKeys).join();
 
+    boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys).get();
+    assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable);
+
     List<HRegionLocation> regions;
     Iterator<HRegionLocation> hris;
     HRegionInfo hri;
@@ -835,4 +841,30 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
     }
   }
 
+  @Test
+  public void testIsTableEnabledAndDisabled() throws Exception {
+    final TableName table = TableName.valueOf("testIsTableEnabledAndDisabled");
+    HTableDescriptor desc = new HTableDescriptor(table);
+    desc.addFamily(new HColumnDescriptor(FAMILY));
+    admin.createTable(desc).join();
+    assertTrue(admin.isTableEnabled(table).get());
+    assertFalse(admin.isTableDisabled(table).get());
+    admin.disableTable(table).join();
+    assertFalse(admin.isTableEnabled(table).get());
+    assertTrue(admin.isTableDisabled(table).get());
+    admin.deleteTable(table).join();
+  }
+
+  @Test
+  public void testTableAvailableWithRandomSplitKeys() throws Exception {
+    TableName tableName = TableName.valueOf("testTableAvailableWithRandomSplitKeys");
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    desc.addFamily(new HColumnDescriptor("col"));
+    byte[][] splitKeys = new byte[1][];
+    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 } };
+    admin.createTable(desc).join();
+    boolean tableAvailable = admin.isTableAvailable(tableName, splitKeys).get();
+    assertFalse("Table should be created with 1 row in META", tableAvailable);
+  }
+
 }


[5/9] hbase git commit: HBASE-17844 Subset of HBASE-14614, Procedure v2: Core Assignment Manager (non-critical changes)

Posted by sy...@apache.org.
HBASE-17844 Subset of HBASE-14614, Procedure v2: Core Assignment Manager (non-critical changes)

Minor changes related to HBASE-14614. Added comments. Changed logging.
Added toString formatting. Removed imports. Removed unused code.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d033cbb7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d033cbb7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d033cbb7

Branch: refs/heads/hbase-12439
Commit: d033cbb715aa6153c4b764ef6638b7a1cecee64e
Parents: 752b258
Author: Michael Stack <st...@apache.org>
Authored: Tue Mar 28 17:20:25 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Thu Mar 30 10:31:04 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/ClusterStatus.java  | 14 +--
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  2 +-
 .../hadoop/hbase/ipc/AbstractRpcClient.java     |  6 +-
 .../hbase/ipc/ServerTooBusyException.java       |  7 +-
 .../apache/hadoop/hbase/master/RegionState.java |  2 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  2 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java     |  1 -
 .../org/apache/hadoop/hbase/ChoreService.java   |  2 +-
 .../org/apache/hadoop/hbase/HConstants.java     |  2 +-
 .../java/org/apache/hadoop/hbase/TableName.java |  7 +-
 .../procedure2/AbstractProcedureScheduler.java  |  3 -
 .../hadoop/hbase/procedure2/LockAndQueue.java   | 29 ++++--
 .../hadoop/hbase/procedure2/Procedure.java      | 10 ++-
 .../hadoop/hbase/procedure2/ProcedureEvent.java |  6 +-
 .../hbase/procedure2/ProcedureExecutor.java     | 23 +++--
 .../hbase/procedure2/ProcedureScheduler.java    |  4 +-
 .../hbase/procedure2/StateMachineProcedure.java |  3 +
 .../hbase/procedure2/util/DelayedUtil.java      | 54 +++++++----
 .../procedure2/ProcedureTestingUtility.java     | 12 +--
 .../hbase/procedure2/util/TestDelayedUtil.java  |  2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |  2 +-
 .../hbase/ipc/BalancedQueueRpcExecutor.java     |  3 -
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java    | 10 +--
 .../hadoop/hbase/ipc/SimpleRpcServer.java       |  4 +-
 .../hadoop/hbase/master/LoadBalancer.java       |  4 +-
 .../hadoop/hbase/master/MasterWalManager.java   | 11 +--
 .../hadoop/hbase/master/RegionStates.java       |  2 +-
 .../master/balancer/StochasticLoadBalancer.java | 17 +---
 .../hbase/master/locking/LockProcedure.java     | 34 ++++---
 .../AbstractStateMachineTableProcedure.java     |  5 +-
 .../procedure/CreateNamespaceProcedure.java     |  1 -
 .../procedure/MasterProcedureScheduler.java     | 95 +++++++++++++-------
 .../hadoop/hbase/regionserver/HRegion.java      |  5 +-
 .../hadoop/hbase/regionserver/HStore.java       |  7 +-
 .../regionserver/handler/OpenRegionHandler.java |  3 +-
 ...sureAwareCompactionThroughputController.java |  6 +-
 .../apache/hadoop/hbase/wal/WALSplitter.java    |  5 +-
 .../hbase/TestStochasticBalancerJmxMetrics.java |  2 +-
 .../hbase/client/TestMetaWithReplicas.java      |  1 +
 .../hbase/io/encoding/TestChangingEncoding.java |  8 +-
 .../hbase/ipc/TestSimpleRpcScheduler.java       |  3 +-
 .../hbase/master/locking/TestLockProcedure.java |  1 -
 .../regionserver/wal/AbstractTestWALReplay.java |  7 +-
 .../regionserver/wal/TestAsyncLogRolling.java   |  7 +-
 44 files changed, 238 insertions(+), 196 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index f00016d..aed3af4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -19,27 +19,15 @@
 
 package org.apache.hadoop.hbase;
 
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
 import java.util.Set;
+import java.util.Map;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.LiveServerInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos.HBaseVersionFileContent;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.util.ByteStringer;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.VersionedWritable;
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index ee8d5fd..15bc132 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -2049,7 +2049,7 @@ public class MetaTableAccessor {
       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
   }
 
-  private static Put addRegionInfo(final Put p, final HRegionInfo hri)
+  public static Put addRegionInfo(final Put p, final HRegionInfo hri)
     throws IOException {
     p.addImmutable(getCatalogFamily(), HConstants.REGIONINFO_QUALIFIER,
       hri.toByteArray());

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index 930f37a..d414f70 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -207,12 +207,12 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
     long closeBeforeTime = EnvironmentEdgeManager.currentTime() - minIdleTimeBeforeClose;
     synchronized (connections) {
       for (T conn : connections.values()) {
-        // remove connection if it has not been chosen by anyone for more than maxIdleTime, and the
-        // connection itself has already shutdown. The latter check is because that we may still
+        // Remove connection if it has not been chosen by anyone for more than maxIdleTime, and the
+        // connection itself has already shutdown. The latter check is because we may still
         // have some pending calls on connection so we should not shutdown the connection outside.
         // The connection itself will disconnect if there is no pending call for maxIdleTime.
         if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) {
-          LOG.info("Cleanup idle connection to " + conn.remoteId().address);
+          if (LOG.isTraceEnabled()) LOG.trace("Cleanup idle connection to " + conn.remoteId().address);
           connections.removeValue(conn.remoteId(), conn);
           conn.cleanupConnection();
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
index c6ba030..0dd8e64 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
@@ -25,14 +25,13 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 /**
- * Throw this in rpc call if there are too many pending requests for one region server
+ * Throw this in RPC call if there are too many pending requests for one region server
  */
+@SuppressWarnings("serial")
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class ServerTooBusyException extends DoNotRetryIOException {
-
   public ServerTooBusyException(InetSocketAddress address, long count) {
-    super("There are " + count + " concurrent rpc requests for " + address);
+    super("Busy Server! " + count + " concurrent RPCs against " + address);
   }
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
index a930732..0e12ef6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
@@ -166,7 +166,7 @@ public class RegionState {
         state = MERGING_NEW;
         break;
       default:
-        throw new IllegalStateException("");
+        throw new IllegalStateException("Unhandled state " + protoState);
       }
       return state;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 4f68447..fcf2c34 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -1803,7 +1803,7 @@ public final class ProtobufUtil {
    * has a serialized {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data
    * to a ServerName instance.
-   * @throws DeserializationException 
+   * @throws DeserializationException
    */
   public static ServerName toServerName(final byte [] data) throws DeserializationException {
     if (data == null || data.length <= 0) return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index f44979c..e969ded 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -149,7 +149,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
index d4ec48e..19363d0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
@@ -248,7 +248,7 @@ public class ChoreService implements ChoreServicer {
    */
   static class ChoreServiceThreadFactory implements ThreadFactory {
     private final String threadPrefix;
-    private final static String THREAD_NAME_SUFFIX = "_ChoreService_";
+    private final static String THREAD_NAME_SUFFIX = "_Chore_";
     private AtomicInteger threadNumber = new AtomicInteger(1);
 
     /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 609e9a5..3789f71 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -146,7 +146,7 @@ public final class HConstants {
   public static final int DEFAULT_HBASE_BALANCER_PERIOD = 300000;
 
   /** The name of the ensemble table */
-  public static final String ENSEMBLE_TABLE_NAME = "hbase:ensemble";
+  public static final TableName ENSEMBLE_TABLE_NAME = TableName.valueOf("hbase:ensemble");
 
   /** Config for pluggable region normalizer */
   public static final String HBASE_MASTER_NORMALIZER_CLASS =

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index 9b9755b..cba03c0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -89,7 +89,12 @@ public final class TableName implements Comparable<TableName> {
   public static final String OLD_META_STR = ".META.";
   public static final String OLD_ROOT_STR = "-ROOT-";
 
-
+  /**
+   * @return True if <code>tn</code> is the hbase:meta table name.
+   */
+  public static boolean isMetaTableName(final TableName tn) {
+    return tn.equals(TableName.META_TABLE_NAME);
+  }
 
   /**
    * TableName for old -ROOT- table. It is used to read/process old WALs which have

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 646bc1f..fbb066c 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -25,13 +25,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 @InterfaceAudience.Private
-@InterfaceStability.Evolving
 public abstract class AbstractProcedureScheduler implements ProcedureScheduler {
   private static final Log LOG = LogFactory.getLog(AbstractProcedureScheduler.class);
-
   private final ReentrantLock schedLock = new ReentrantLock();
   private final Condition schedWaitCond = schedLock.newCondition();
   private boolean running = false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
index 19ba28c..e11c23c 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
@@ -19,24 +19,25 @@
 package org.apache.hadoop.hbase.procedure2;
 
 /**
- * Locking for mutual exclusion between procedures. Only by procedure framework internally.
+ * Locking for mutual exclusion between procedures. Used only by procedure framework internally.
  * {@link LockAndQueue} has two purposes:
  * <ol>
- *   <li>Acquire/release exclusive/shared locks</li>
- *   <li>Maintain a list of procedures waiting for this lock<br>
- *      To do so, {@link LockAndQueue} extends {@link ProcedureDeque} class. Using inheritance over
- *      composition for this need is unusual, but the choice is motivated by million regions
- *      assignment case as it will reduce memory footprint and number of objects to be GCed.
+ *   <li>Acquire/release exclusive/shared locks.</li>
+ *   <li>Maintains a list of procedures waiting on this lock.
+ *      {@link LockAndQueue} extends {@link ProcedureDeque} class. Blocked Procedures are added
+ *      to our super Deque. Using inheritance over composition to keep the Deque of waiting
+ *      Procedures is unusual, but we do it this way because in certain cases, there will be
+ *      millions of regions. This layout uses less memory.
  * </ol>
  *
- * NOT thread-safe. Needs external concurrency control. For eg. Uses in MasterProcedureScheduler are
+ * <p>NOT thread-safe. Needs external concurrency control: e.g. uses in MasterProcedureScheduler are
  * guarded by schedLock().
  * <br>
  * There is no need of 'volatile' keyword for member variables because of memory synchronization
  * guarantees of locks (see 'Memory Synchronization',
  * http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Lock.html)
  * <br>
- * We do not implement Lock interface because we need exclusive + shared locking, and also
+ * We do not implement Lock interface because we need exclusive and shared locking, and also
  * because try-lock functions require procedure id.
  * <br>
  * We do not use ReentrantReadWriteLock directly because of its high memory overhead.
@@ -104,6 +105,9 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
     return true;
   }
 
+  /**
+   * @return True if we released a lock.
+   */
   public boolean releaseExclusiveLock(final Procedure proc) {
     if (isLockOwner(proc.getProcId())) {
       exclusiveLockProcIdOwner = Long.MIN_VALUE;
@@ -111,4 +115,11 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
     }
     return false;
   }
-}
+
+  @Override
+  public String toString() {
+    return "exclusiveLockOwner=" + (hasExclusiveLock()? getExclusiveLockProcIdOwner(): "NONE") +
+      ", sharedLockCount=" + getSharedLockCount() +
+      ", waitingProcCount=" + size();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index fee5250..2a7fa6e 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -253,13 +253,12 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    */
   protected StringBuilder toStringSimpleSB() {
     final StringBuilder sb = new StringBuilder();
-    toStringClassDetails(sb);
 
-    sb.append(", procId=");
+    sb.append("procId=");
     sb.append(getProcId());
 
     if (hasParent()) {
-      sb.append(", parent=");
+      sb.append(", parentProcId=");
       sb.append(getParentProcId());
     }
 
@@ -275,6 +274,9 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
       sb.append(", failed=" + getException());
     }
 
+    sb.append(", ");
+    toStringClassDetails(sb);
+
     return sb;
   }
 
@@ -631,7 +633,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    */
   @InterfaceAudience.Private
   protected synchronized boolean childrenCountDown() {
-    assert childrenLatch > 0;
+    assert childrenLatch > 0: this;
     return --childrenLatch == 0;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java
index cb90ac0..43cce3a 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.procedure2;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 /**
  * Basic ProcedureEvent that contains an "object", which can be a description or a reference to the
@@ -50,6 +49,7 @@ public class ProcedureEvent<T> {
 
   @Override
   public String toString() {
-    return getClass().getSimpleName() + "(" + object + ")";
+    return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() +
+        ", suspended procedures count=" + getSuspendedProcedures().size();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 0856aa2..e2f63c6 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -360,8 +360,7 @@ public class ProcedureExecutor<TEnvironment> {
       assert !(proc.isFinished() && !proc.hasParent()) : "unexpected completed proc=" + proc;
 
       if (debugEnabled) {
-        LOG.debug(String.format("Loading state=%s isFailed=%s: %s",
-                    proc.getState(), proc.hasException(), proc));
+        LOG.debug(String.format("Loading %s", proc));
       }
 
       Long rootProcId = getRootProcedureId(proc);
@@ -483,7 +482,7 @@ public class ProcedureExecutor<TEnvironment> {
     // We have numThreads executor + one timer thread used for timing out
     // procedures and triggering periodic procedures.
     this.corePoolSize = numThreads;
-    LOG.info("Starting executor threads=" + corePoolSize);
+    LOG.info("Starting executor worker threads=" + corePoolSize);
 
     // Create the Thread Group for the executors
     threadGroup = new ThreadGroup("ProcedureExecutor");
@@ -522,7 +521,9 @@ public class ProcedureExecutor<TEnvironment> {
       store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
 
     // Start the executors. Here we must have the lastProcId set.
-    LOG.debug("Start workers " + workerThreads.size());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Start workers " + workerThreads.size());
+    }
     timeoutExecutor.start();
     for (WorkerThread worker: workerThreads) {
       worker.start();
@@ -1147,8 +1148,7 @@ public class ProcedureExecutor<TEnvironment> {
 
       if (proc.isSuccess()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Completed in " +
-              StringUtils.humanTimeDiff(proc.elapsedTime()) + ": " + proc);
+          LOG.debug("Finished " + proc + " in " + StringUtils.humanTimeDiff(proc.elapsedTime()));
         }
         // Finalize the procedure state
         if (proc.getProcId() == rootProcId) {
@@ -1242,8 +1242,7 @@ public class ProcedureExecutor<TEnvironment> {
 
     // Finalize the procedure state
     LOG.info("Rolled back " + rootProc +
-             " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()) +
-             " exception=" + exception.getMessage());
+             " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()));
     procedureFinished(rootProc);
     return LockState.LOCK_ACQUIRED;
   }
@@ -1342,7 +1341,7 @@ public class ProcedureExecutor<TEnvironment> {
         return;
       } catch (Throwable e) {
         // Catch NullPointerExceptions or similar errors...
-        String msg = "CODE-BUG: Uncatched runtime exception for procedure: " + procedure;
+        String msg = "CODE-BUG: Uncaught runtime exception: " + procedure;
         LOG.error(msg, e);
         procedure.setFailure(new RemoteProcedureException(msg, e));
       }
@@ -1558,7 +1557,7 @@ public class ProcedureExecutor<TEnvironment> {
     private final AtomicLong executionStartTime = new AtomicLong(Long.MAX_VALUE);
 
     public WorkerThread(final ThreadGroup group) {
-      super(group, "ProcedureExecutorWorker-" + workerId.incrementAndGet());
+      super(group, "ProcExecWorker-" + workerId.incrementAndGet());
     }
 
     @Override
@@ -1674,7 +1673,7 @@ public class ProcedureExecutor<TEnvironment> {
         // if the procedure is in a waiting state again, put it back in the queue
         procedure.updateTimestamp();
         if (procedure.isWaiting()) {
-          delayed.setTimeoutTimestamp(procedure.getTimeoutTimestamp());
+          delayed.setTimeout(procedure.getTimeoutTimestamp());
           queue.add(delayed);
         }
       } else {
@@ -1752,7 +1751,7 @@ public class ProcedureExecutor<TEnvironment> {
     }
 
     @Override
-    public long getTimeoutTimestamp() {
+    public long getTimeout() {
       return timeout;
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index 16ff781..617532b 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
@@ -23,13 +23,11 @@ import com.google.common.annotations.VisibleForTesting;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 /**
  * Keep track of the runnable procedures
  */
 @InterfaceAudience.Private
-@InterfaceStability.Evolving
 public interface ProcedureScheduler {
   /**
    * Start the scheduler
@@ -93,7 +91,7 @@ public interface ProcedureScheduler {
   Procedure poll(long timeout, TimeUnit unit);
 
   /**
-   * Mark the event has not ready.
+   * Mark the event as not ready.
    * procedures calling waitEvent() will be suspended.
    * @param event the event to mark as suspended/not ready
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
index 5c3a4c7..ea2a41f 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
@@ -108,6 +108,9 @@ public abstract class StateMachineProcedure<TEnvironment, TState>
     if (aborted.get() && isRollbackSupported(getCurrentState())) {
       setAbortFailure(getClass().getSimpleName(), "abort requested");
     } else {
+      if (aborted.get()) {
+        LOG.warn("ignoring abort request " + state);
+      }
       setNextState(getStateId(state));
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
index ea34c49..cde37bd 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
@@ -32,13 +32,19 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 public final class DelayedUtil {
   private DelayedUtil() { }
 
+  /**
+   * Add a timeout to a Delay
+   */
   public interface DelayedWithTimeout extends Delayed {
-    long getTimeoutTimestamp();
+    long getTimeout();
   }
 
+  /**
+   * POISON implementation; used to mark special state: e.g. shutdown.
+   */
   public static final DelayedWithTimeout DELAYED_POISON = new DelayedWithTimeout() {
     @Override
-    public long getTimeoutTimestamp() {
+    public long getTimeout() {
       return 0;
     }
 
@@ -49,7 +55,7 @@ public final class DelayedUtil {
 
     @Override
     public int compareTo(final Delayed o) {
-      return Long.compare(0, DelayedUtil.getTimeoutTimestamp(o));
+      return Long.compare(0, DelayedUtil.getTimeout(o));
     }
 
     @Override
@@ -63,6 +69,9 @@ public final class DelayedUtil {
     }
   };
 
+  /**
+   * @return null (if an interrupt) or an instance of E; resets interrupt on calling thread.
+   */
   public static <E extends Delayed> E takeWithoutInterrupt(final DelayQueue<E> queue) {
     try {
       return queue.take();
@@ -72,33 +81,42 @@ public final class DelayedUtil {
     }
   }
 
-  public static long getRemainingTime(final TimeUnit resultUnit, final long timeoutTime) {
+  /**
+   * @return Time remaining as milliseconds.
+   */
+  public static long getRemainingTime(final TimeUnit resultUnit, final long timeout) {
     final long currentTime = EnvironmentEdgeManager.currentTime();
-    if (currentTime >= timeoutTime) {
+    if (currentTime >= timeout) {
       return 0;
     }
-    return resultUnit.convert(timeoutTime - currentTime, TimeUnit.MILLISECONDS);
+    return resultUnit.convert(timeout - currentTime, TimeUnit.MILLISECONDS);
   }
 
   public static int compareDelayed(final Delayed o1, final Delayed o2) {
-    return Long.compare(getTimeoutTimestamp(o1), getTimeoutTimestamp(o2));
+    return Long.compare(getTimeout(o1), getTimeout(o2));
   }
 
-  private static long getTimeoutTimestamp(final Delayed o) {
+  private static long getTimeout(final Delayed o) {
     assert o instanceof DelayedWithTimeout : "expected DelayedWithTimeout instance, got " + o;
-    return ((DelayedWithTimeout)o).getTimeoutTimestamp();
+    return ((DelayedWithTimeout)o).getTimeout();
   }
 
   public static abstract class DelayedObject implements DelayedWithTimeout {
     @Override
     public long getDelay(final TimeUnit unit) {
-      return DelayedUtil.getRemainingTime(unit, getTimeoutTimestamp());
+      return DelayedUtil.getRemainingTime(unit, getTimeout());
     }
 
     @Override
     public int compareTo(final Delayed other) {
       return DelayedUtil.compareDelayed(this, other);
     }
+
+    @Override
+    public String toString() {
+      long timeout = getTimeout();
+      return "timeout=" + timeout + ", delay=" + getDelay(TimeUnit.MILLISECONDS);
+    }
   }
 
   public static abstract class DelayedContainer<T> extends DelayedObject {
@@ -126,25 +144,25 @@ public final class DelayedUtil {
 
     @Override
     public String toString() {
-      return getClass().getSimpleName() + "(" + getObject() + ")";
+      return "containedObject=" + getObject() + ", " + super.toString();
     }
   }
 
   public static class DelayedContainerWithTimestamp<T> extends DelayedContainer<T> {
-    private long timeoutTimestamp;
+    private long timeout;
 
-    public DelayedContainerWithTimestamp(final T object, final long timeoutTimestamp) {
+    public DelayedContainerWithTimestamp(final T object, final long timeout) {
       super(object);
-      setTimeoutTimestamp(timeoutTimestamp);
+      setTimeout(timeout);
     }
 
     @Override
-    public long getTimeoutTimestamp() {
-      return timeoutTimestamp;
+    public long getTimeout() {
+      return timeout;
     }
 
-    public void setTimeoutTimestamp(final long timeoutTimestamp) {
-      this.timeoutTimestamp = timeoutTimestamp;
+    public void setTimeout(final long timeout) {
+      this.timeout = timeout;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index 226666f..0240465 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.hbase.procedure2;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.concurrent.Callable;
 import java.util.ArrayList;
 import java.util.Set;
+import java.util.concurrent.Callable;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,18 +39,14 @@ import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.io.util.StreamUtils;
+import org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-import org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.hadoop.hbase.util.Threads;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 public class ProcedureTestingUtility {
   private static final Log LOG = LogFactory.getLog(ProcedureTestingUtility.class);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java
index a2cd70f..019b456 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java
@@ -80,7 +80,7 @@ public class TestDelayedUtil {
     }
 
     @Override
-    public long getTimeoutTimestamp() {
+    public long getTimeout() {
       return 0;
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 30efc0a..5cdfad2 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -138,7 +138,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
             }
           }
         }
-        groupClusterLoad.put(TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME), groupClusterState);
+        groupClusterLoad.put(HConstants.ENSEMBLE_TABLE_NAME, groupClusterState);
         this.internalBalancer.setClusterLoad(groupClusterLoad);
         List<RegionPlan> groupPlans = this.internalBalancer
             .balanceCluster(groupClusterState);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
index f792b36..558c9c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import java.util.List;
 import java.util.concurrent.BlockingQueue;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
index d51d83b..2ee2d7e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -37,8 +35,6 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObserver {
-  private static final Log LOG = LogFactory.getLog(SimpleRpcScheduler.class);
-
   private int port;
   private final PriorityFunction priority;
   private final RpcExecutor callExecutor;
@@ -82,14 +78,14 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
 
     if (callqReadShare > 0) {
       // at least 1 read handler and 1 write handler
-      callExecutor = new RWQueueRpcExecutor("deafult.RWQ", Math.max(2, handlerCount),
+      callExecutor = new RWQueueRpcExecutor("default.RWQ", Math.max(2, handlerCount),
         maxQueueLength, priority, conf, server);
     } else {
       if (RpcExecutor.isFifoQueueType(callQueueType) || RpcExecutor.isCodelQueueType(callQueueType)) {
-        callExecutor = new FastPathBalancedQueueRpcExecutor("deafult.FPBQ", handlerCount,
+        callExecutor = new FastPathBalancedQueueRpcExecutor("default.FPBQ", handlerCount,
             maxQueueLength, priority, conf, server);
       } else {
-        callExecutor = new BalancedQueueRpcExecutor("deafult.BQ", handlerCount, maxQueueLength,
+        callExecutor = new BalancedQueueRpcExecutor("default.BQ", handlerCount, maxQueueLength,
             priority, conf, server);
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 5f90d50..c409f6e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -1980,8 +1980,8 @@ public class SimpleRpcServer extends RpcServer {
           if (!running) {
             return;
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(Thread.currentThread().getName()+": task running");
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("running");
           }
           try {
             closeIdle(false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 277dcc8..01540b7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -44,7 +44,9 @@ import edu.umd.cs.findbugs.annotations.Nullable;
  * <p>On cluster startup, bulk assignment can be used to determine
  * locations for all Regions in a cluster.
  *
- * <p>This classes produces plans for the {@link AssignmentManager} to execute.
+ * <p>This classes produces plans for the
+ * {@link org.apache.hadoop.hbase.master.AssignmentManager}
+ * to execute.
  */
 @InterfaceAudience.Private
 public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index e67af14..105fa29 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -218,9 +219,7 @@ public class MasterWalManager {
   }
 
   public void splitLog(final ServerName serverName) throws IOException {
-    Set<ServerName> serverNames = new HashSet<>();
-    serverNames.add(serverName);
-    splitLog(serverNames);
+    splitLog(Collections.<ServerName>singleton(serverName));
   }
 
   /**
@@ -228,9 +227,7 @@ public class MasterWalManager {
    * @param serverName logs belonging to this server will be split
    */
   public void splitMetaLog(final ServerName serverName) throws IOException {
-    Set<ServerName> serverNames = new HashSet<>();
-    serverNames.add(serverName);
-    splitMetaLog(serverNames);
+    splitMetaLog(Collections.<ServerName>singleton(serverName));
   }
 
   /**
@@ -347,4 +344,4 @@ public class MasterWalManager {
   public RecoveryMode getLogRecoveryMode() {
     return this.splitLogManager.getRecoveryMode();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index a1e24f2..dcbf5a4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -1033,7 +1033,7 @@ public class RegionStates {
     for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
       for (HRegionInfo hri: e.getValue()) {
         if (hri.isMetaRegion()) continue;
-        TableName tablename = bytable ? hri.getTable() : TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME);
+        TableName tablename = bytable ? hri.getTable() : HConstants.ENSEMBLE_TABLE_NAME;
         Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
         if (svrToRegions == null) {
           svrToRegions = new HashMap<>(serverHoldings.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 59ea067..01058d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -30,7 +30,7 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
@@ -156,23 +157,16 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
   @Override
   public synchronized void setConf(Configuration conf) {
     super.setConf(conf);
-    LOG.info("loading config");
-
     maxSteps = conf.getInt(MAX_STEPS_KEY, maxSteps);
-
     stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion);
     maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime);
-
     numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember);
     isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable);
-
     minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance);
-
     if (localityCandidateGenerator == null) {
       localityCandidateGenerator = new LocalityBasedCandidateGenerator(services);
     }
     localityCost = new LocalityCostFunction(conf, services);
-
     if (candidateGenerators == null) {
       candidateGenerators = new CandidateGenerator[] {
           new RandomCandidateGenerator(),
@@ -181,17 +175,14 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
           new RegionReplicaRackCandidateGenerator(),
       };
     }
-
     regionLoadFunctions = new CostFromRegionLoadFunction[] {
       new ReadRequestCostFunction(conf),
       new WriteRequestCostFunction(conf),
       new MemstoreSizeCostFunction(conf),
       new StoreFileCostFunction(conf)
     };
-
     regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf);
     regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf);
-
     costFunctions = new CostFunction[]{
       new RegionCountSkewCostFunction(conf),
       new PrimaryRegionCountSkewCostFunction(conf),
@@ -205,10 +196,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
       regionLoadFunctions[2],
       regionLoadFunctions[3],
     };
-
     curFunctionCosts= new Double[costFunctions.length];
     tempFunctionCosts= new Double[costFunctions.length];
-
+    LOG.info("Loaded config; maxSteps=" + maxSteps + ", stepsPerRegion=" + stepsPerRegion +
+        ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + ", etc.");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index 8e490eb..3cad51c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -43,11 +43,15 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
- * Procedure to allow clients and external admin tools to take locks on table/namespace/regions.
- * This procedure when scheduled, acquires specified locks, suspends itself and waits for :
- * - call to unlock: if lock request came from the process itself, say master chore.
- * - Timeout : if lock request came from RPC. On timeout, evaluates if it should continue holding
- * the lock or not based on last heartbeat timestamp.
+ * Procedure to allow blessed clients and external admin tools to take our internal Schema locks
+ * used by the procedure framework isolating procedures doing creates/deletes etc. on
+ * table/namespace/regions.
+ * This procedure when scheduled, acquires specified locks, suspends itself and waits for:
+ * <ul>
+ * <li>Call to unlock: if lock request came from the process itself, say master chore.</li>
+ * <li>Timeout : if lock request came from RPC. On timeout, evaluates if it should continue holding
+ * the lock or not based on last heartbeat timestamp.</li>
+ * </ul>
  */
 @InterfaceAudience.Private
 public final class LockProcedure extends Procedure<MasterProcedureEnv>
@@ -191,7 +195,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   public void updateHeartBeat() {
     lastHeartBeat.set(System.currentTimeMillis());
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Update heartbeat. Proc: " + toString());
+      LOG.debug("Heartbeat " + toString());
     }
   }
 
@@ -202,8 +206,10 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
    */
   protected boolean setTimeoutFailure(final MasterProcedureEnv env) {
     synchronized (event) {
-      if (!event.isReady()) {  // maybe unlock() awakened the event.
+      if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event);
+      if (!event.isReady()) {  // Maybe unlock() awakened the event.
         setState(ProcedureProtos.ProcedureState.RUNNABLE);
+        if (LOG.isDebugEnabled()) LOG.debug("Calling wake on " + this.event);
         env.getProcedureScheduler().wakeEvent(event);
       }
     }
@@ -234,7 +240,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
     }
     if (unlock.get() || hasHeartbeatExpired()) {
       locked.set(false);
-      LOG.debug((unlock.get() ? "UNLOCKED - " : "TIMED OUT - ") + toString());
+      LOG.debug((unlock.get()? "UNLOCKED " : "TIMED OUT ") + toString());
       return null;
     }
     synchronized (event) {
@@ -302,7 +308,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
     hasLock = ret;
     if (ret) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("LOCKED - " + toString());
+        LOG.debug("LOCKED " + toString());
       }
       lastHeartBeat.set(System.currentTimeMillis());
       return LockState.LOCK_ACQUIRED;
@@ -352,7 +358,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
     } else if (tableName != null) {
       return setupTableLock();
     } else {
-      LOG.error("Unknown level specified in proc - " + toString());
+      LOG.error("Unknown level specified in " + toString());
       throw new IllegalArgumentException("no namespace/table/region provided");
     }
   }
@@ -364,10 +370,10 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
         this.opType = TableOperationType.EDIT;
         return new NamespaceExclusiveLock();
       case SHARED:
-        LOG.error("Shared lock on namespace not supported. Proc - " + toString());
+        LOG.error("Shared lock on namespace not supported for " + toString());
         throw new IllegalArgumentException("Shared lock on namespace not supported");
       default:
-        LOG.error("Unexpected lock type in proc - " + toString());
+        LOG.error("Unexpected lock type " + toString());
         throw new IllegalArgumentException("Wrong lock type: " + type.toString());
     }
   }
@@ -381,7 +387,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
         this.opType = TableOperationType.READ;
         return new TableSharedLock();
       default:
-        LOG.error("Unexpected lock type in proc - " + toString());
+        LOG.error("Unexpected lock type " + toString());
         throw new IllegalArgumentException("Wrong lock type:" + type.toString());
     }
   }
@@ -393,7 +399,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
         this.opType = TableOperationType.REGION_EDIT;
         return new RegionExclusiveLock();
       default:
-        LOG.error("Only exclusive lock supported on regions. Proc - " + toString());
+        LOG.error("Only exclusive lock supported on regions for " + toString());
         throw new IllegalArgumentException("Only exclusive lock supported on regions.");
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index e957f9d..9f23848 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -69,9 +69,8 @@ public abstract class AbstractStateMachineTableProcedure<TState>
   @Override
   public void toStringClassDetails(final StringBuilder sb) {
     sb.append(getClass().getSimpleName());
-    sb.append(" (table=");
+    sb.append(" table=");
     sb.append(getTableName());
-    sb.append(")");
   }
 
   @Override
@@ -111,4 +110,4 @@ public abstract class AbstractStateMachineTableProcedure<TState>
       throw new TableNotFoundException(getTableName());
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
index 2c39c09..7d65126 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -62,7 +62,6 @@ public class CreateNamespaceProcedure
     if (isTraceEnabled()) {
       LOG.trace(this + " execute state=" + state);
     }
-
     try {
       switch (state) {
       case CREATE_NAMESPACE_PREPARE:

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index ebf79fa..48a0b62 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
@@ -33,7 +34,6 @@ import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
 import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
 import org.apache.hadoop.hbase.procedure2.LockStatus;
@@ -51,52 +51,51 @@ import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator;
  * This ProcedureScheduler tries to provide to the ProcedureExecutor procedures
  * that can be executed without having to wait on a lock.
  * Most of the master operations can be executed concurrently, if they
- * are operating on different tables (e.g. two create table can be performed
- * at the same, time assuming table A and table B) or against two different servers; say
- * two servers that crashed at about the same time.
+ * are operating on different tables (e.g. two create table procedures can be performed
+ * at the same time) or against two different servers; say two servers that crashed at
+ * about the same time.
  *
- * <p>Each procedure should implement an interface providing information for this queue.
- * for example table related procedures should implement TableProcedureInterface.
- * each procedure will be pushed in its own queue, and based on the operation type
- * we may take smarter decision. e.g. we can abort all the operations preceding
+ * <p>Each procedure should implement an Interface providing information for this queue.
+ * For example table related procedures should implement TableProcedureInterface.
+ * Each procedure will be pushed in its own queue, and based on the operation type
+ * we may make smarter decisions: e.g. we can abort all the operations preceding
  * a delete table, or similar.
  *
  * <h4>Concurrency control</h4>
  * Concurrent access to member variables (tableRunQueue, serverRunQueue, locking, tableMap,
- * serverBuckets) is controlled by schedLock(). That mainly includes:<br>
+ * serverBuckets) is controlled by schedLock(). This mainly includes:<br>
  * <ul>
  *   <li>
- *     {@link #push(Procedure, boolean, boolean)} : A push will add a Queue back to run-queue
+ *     {@link #push(Procedure, boolean, boolean)}: A push will add a Queue back to run-queue
  *     when:
  *     <ol>
- *       <li>queue was empty before push (so must have been out of run-queue)</li>
- *       <li>child procedure is added (which means parent procedure holds exclusive lock, and it
+ *       <li>Queue was empty before push (so must have been out of run-queue)</li>
+ *       <li>Child procedure is added (which means parent procedure holds exclusive lock, and it
  *           must have moved Queue out of run-queue)</li>
  *     </ol>
  *   </li>
  *   <li>
- *     {@link #poll(long)} : A poll will remove a Queue from run-queue when:
+ *     {@link #poll(long)}: A poll will remove a Queue from run-queue when:
  *     <ol>
- *       <li>queue becomes empty after poll</li>
- *       <li>exclusive lock is requested by polled procedure and lock is available (returns the
+ *       <li>Queue becomes empty after poll</li>
+ *       <li>Exclusive lock is requested by polled procedure and lock is available (returns the
  *           procedure)</li>
- *       <li>exclusive lock is requested but lock is not available (returns null)</li>
- *       <li>Polled procedure is child of parent holding exclusive lock, and the next procedure is
+ *       <li>Exclusive lock is requested but lock is not available (returns null)</li>
+ *       <li>Polled procedure is child of parent holding exclusive lock and the next procedure is
  *           not a child</li>
  *     </ol>
  *   </li>
  *   <li>
- *     namespace/table/region locks: Queue is added back to run-queue when lock being released is:
+ *     Namespace/table/region locks: Queue is added back to run-queue when lock being released is:
  *     <ol>
- *       <li>exclusive lock</li>
- *       <li>last shared lock (in case queue was removed because next procedure in queue required
+ *       <li>Exclusive lock</li>
+ *       <li>Last shared lock (in case queue was removed because next procedure in queue required
  *           exclusive lock)</li>
  *     </ol>
  *   </li>
  * </ul>
  */
 @InterfaceAudience.Private
-@InterfaceStability.Evolving
 public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   private static final Log LOG = LogFactory.getLog(MasterProcedureScheduler.class);
 
@@ -118,16 +117,16 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    * TableQueue with priority 1.
    */
   private static class TablePriorities {
+    final int metaTablePriority;
+    final int userTablePriority;
+    final int sysTablePriority;
+
     TablePriorities(Configuration conf) {
       metaTablePriority = conf.getInt("hbase.master.procedure.queue.meta.table.priority", 3);
       sysTablePriority = conf.getInt("hbase.master.procedure.queue.system.table.priority", 2);
       userTablePriority = conf.getInt("hbase.master.procedure.queue.user.table.priority", 1);
     }
 
-    final int metaTablePriority;
-    final int userTablePriority;
-    final int sysTablePriority;
-
     int getPriority(TableName tableName) {
       if (tableName.equals(TableName.META_TABLE_NAME)) {
         return metaTablePriority;
@@ -773,7 +772,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
           locking.getTableLock(TableName.NAMESPACE_TABLE_NAME);
       namespaceLock.releaseExclusiveLock(procedure);
       int waitingCount = 0;
-      if(systemNamespaceTableLock.releaseSharedLock()) {
+      if (systemNamespaceTableLock.releaseSharedLock()) {
         addToRunQueue(tableRunQueue, getTableQueue(TableName.NAMESPACE_TABLE_NAME));
         waitingCount += wakeWaitingProcedures(systemNamespaceTableLock);
       }
@@ -924,6 +923,12 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    * locks.
    */
   private static class SchemaLocking {
+    final Map<ServerName, LockAndQueue> serverLocks = new HashMap<>();
+    final Map<String, LockAndQueue> namespaceLocks = new HashMap<>();
+    final Map<TableName, LockAndQueue> tableLocks = new HashMap<>();
+    // Single map for all regions irrespective of tables. Key is encoded region name.
+    final Map<String, LockAndQueue> regionLocks = new HashMap<>();
+
     private <T> LockAndQueue getLock(Map<T, LockAndQueue> map, T key) {
       LockAndQueue lock = map.get(key);
       if (lock == null) {
@@ -969,11 +974,29 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       regionLocks.clear();
     }
 
-    final Map<ServerName, LockAndQueue> serverLocks = new HashMap<>();
-    final Map<String, LockAndQueue> namespaceLocks = new HashMap<>();
-    final Map<TableName, LockAndQueue> tableLocks = new HashMap<>();
-    // Single map for all regions irrespective of tables. Key is encoded region name.
-    final Map<String, LockAndQueue> regionLocks = new HashMap<>();
+    @Override
+    public String toString() {
+      return "serverLocks=" + filterUnlocked(this.serverLocks) +
+        ", namespaceLocks=" + filterUnlocked(this.namespaceLocks) +
+        ", tableLocks=" + filterUnlocked(this.tableLocks) +
+        ", regionLocks=" + filterUnlocked(this.regionLocks);
+    }
+
+    private String filterUnlocked(Map<?, LockAndQueue> locks) {
+      StringBuilder sb = new StringBuilder("{");
+      int initialLength = sb.length();
+      for (Map.Entry<?, LockAndQueue> entry: locks.entrySet()) {
+        if (!entry.getValue().isLocked()) continue;
+        if (sb.length() > initialLength) sb.append(", ");
+          sb.append("{");
+          sb.append(entry.getKey());
+          sb.append("=");
+          sb.append(entry.getValue());
+          sb.append("}");
+        }
+        sb.append("}");
+        return sb.toString();
+     }
   }
 
   // ======================================================================
@@ -1057,4 +1080,14 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return Math.max(1, queue.getPriority() * quantum); // TODO
     }
   }
+
+  /**
+   * For debugging. Expensive.
+    * @throws IOException
+    */
+  @VisibleForTesting
+  public String dumpLocks() throws IOException {
+    // TODO: Refactor so we stream out locks for case when millions; i.e. take a PrintWriter
+    return this.locking.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8deb9f1..7f889ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -287,7 +287,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   protected final Configuration conf;
   private final Configuration baseConf;
   private final int rowLockWaitDuration;
-  private CompactedHFilesDischarger compactedFileDischarger;
   static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000;
 
   // The internal wait duration to acquire a lock before read/update
@@ -1703,8 +1702,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       if (this.metricsRegionWrapper != null) {
         Closeables.closeQuietly(this.metricsRegionWrapper);
       }
-      // stop the Compacted hfile discharger
-      if (this.compactedFileDischarger != null) this.compactedFileDischarger.cancel(true);
       status.markComplete("Closed");
       LOG.info("Closed " + this);
       return result;
@@ -7612,7 +7609,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   public static final long FIXED_OVERHEAD = ClassSize.align(
       ClassSize.OBJECT +
       ClassSize.ARRAY +
-      50 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
+      49 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
       (14 * Bytes.SIZEOF_LONG) +
       6 * Bytes.SIZEOF_BOOLEAN);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index a988c5b..a98f89e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1298,10 +1298,9 @@ public class HStore implements Store {
       }
 
       // Ready to go. Have list of files to compact.
-      LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in "
-          + this + " of " + this.getRegionInfo().getRegionNameAsString()
-          + " into tmpdir=" + fs.getTempDir() + ", totalSize="
-          + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
+      LOG.info("Starting compaction of " + filesToCompact +
+        " into tmpdir=" + fs.getTempDir() + ", totalSize=" +
+          TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
 
       // Commence the compaction.
       List<Path> newFiles = compaction.compact(throughputController, user);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index 5bd2d44..8369100 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -123,8 +123,7 @@ public class OpenRegionHandler extends EventHandler {
       openSuccessful = true;
 
       // Done!  Successful region open
-      LOG.debug("Opened " + regionName + " on " +
-        this.server.getServerName());
+      LOG.debug("Opened " + regionName + " on " + this.server.getServerName());
     } finally {
       // Do all clean up here
       if (!openSuccessful) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
index c0d3b74..ebb83f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
@@ -99,8 +99,10 @@ public class PressureAwareCompactionThroughputController extends PressureAwareTh
           maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound)
               * compactionPressure;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("compactionPressure is " + compactionPressure + ", tune compaction throughput to "
+    if (LOG.isTraceEnabled()) {
+      // TODO: FIX!!! Don't log unless some activity or a change in config. Making TRACE
+      // in the meantime.
+      LOG.trace("CompactionPressure is " + compactionPressure + ", tune throughput to "
           + throughputDesc(maxThroughputToSet));
     }
     this.setMaxThroughput(maxThroughputToSet);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index d87c71b..77c2d1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -695,7 +695,8 @@ public class WALSplitter {
    */
   public static long writeRegionSequenceIdFile(final FileSystem fs, final Path regiondir,
       long newSeqId, long saftyBumper) throws IOException {
-
+    // TODO: Why are we using a method in here as part of our normal region open where
+    // there is no splitting involved? Fix. St.Ack 01/20/2017.
     Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
     long maxSeqId = 0;
     FileStatus[] files = null;
@@ -732,7 +733,7 @@ public class WALSplitter {
           throw new IOException("Failed to create SeqId file:" + newSeqIdFile);
         }
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Wrote region seqId=" + newSeqIdFile + " to file, newSeqId=" + newSeqId
+          LOG.debug("Wrote file=" + newSeqIdFile + ", newSeqId=" + newSeqId
               + ", maxSeqId=" + maxSeqId);
         }
       } catch (FileAlreadyExistsException ignored) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
index c4abd89..7ad99c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
@@ -127,7 +127,7 @@ public class TestStochasticBalancerJmxMetrics extends BalancerTestBase {
     conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false);
     loadBalancer.setConf(conf);
 
-    TableName tableName = TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME);
+    TableName tableName = HConstants.ENSEMBLE_TABLE_NAME;
     Map<ServerName, List<HRegionInfo>> clusterState = mockClusterServers(mockCluster_ensemble);
     loadBalancer.balanceCluster(tableName, clusterState);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 527c910..a700ebe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.HBaseFsckRepair;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
index 97f74af..f49fd75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
@@ -150,6 +150,9 @@ public class TestChangingEncoding {
       Result result = table.get(get);
       for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
         Cell kv = result.getColumnLatestCell(CF_BYTES, getQualifier(j));
+        if (kv == null) {
+          continue;
+        }
         assertTrue(CellUtil.matchingValue(kv, getValue(batchId, i, j)));
       }
     }
@@ -238,7 +241,7 @@ public class TestChangingEncoding {
   public void testCrazyRandomChanges() throws Exception {
     prepareTest("RandomChanges");
     Random rand = new Random(2934298742974297L);
-    for (int i = 0; i < 20; ++i) {
+    for (int i = 0; i < 10; ++i) {
       int encodingOrdinal = rand.nextInt(DataBlockEncoding.values().length);
       DataBlockEncoding encoding = DataBlockEncoding.values()[encodingOrdinal];
       setEncodingConf(encoding, rand.nextBoolean());
@@ -246,5 +249,4 @@ public class TestChangingEncoding {
       verifyAllData();
     }
   }
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 5e4520d..66b77cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -434,7 +434,7 @@ public class TestSimpleRpcScheduler {/*
   @Test
   public void testCoDelScheduling() throws Exception {
     CoDelEnvironmentEdge envEdge = new CoDelEnvironmentEdge();
-    envEdge.threadNamePrefixs.add("RpcServer.deafult.FPBQ.Codel.handler");
+    envEdge.threadNamePrefixs.add("RpcServer.default.FPBQ.Codel.handler");
     Configuration schedConf = HBaseConfiguration.create();
     schedConf.setInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 250);
     schedConf.set(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY,
@@ -456,7 +456,6 @@ public class TestSimpleRpcScheduler {/*
       for (int i = 0; i < 100; i++) {
         long time = System.currentTimeMillis();
         envEdge.timeQ.put(time);
-        long now = System.currentTimeMillis();
         CallRunner cr = getMockedCallRunner(time, 2);
         // LOG.info("" + i + " " + (System.currentTimeMillis() - now) + " cr=" + cr);
         scheduler.dispatch(cr);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
index f09ac07..d2a2c93 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
@@ -76,7 +76,6 @@ public class TestLockProcedure {
   // crank this up if this test turns out to be flaky.
   private static final int HEARTBEAT_TIMEOUT = 1000;
   private static final int LOCAL_LOCKS_TIMEOUT = 2000;
-  private static final int ZK_EXPIRATION = 2 * HEARTBEAT_TIMEOUT;
 
   private static final Log LOG = LogFactory.getLog(TestLockProcedure.class);
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index e5e7c83..be725fe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -207,10 +207,9 @@ public abstract class AbstractTestWALReplay {
 
     // move region to another regionserver
     Region destRegion = regions.get(0);
-    int originServerNum = hbaseCluster
-        .getServerWith(destRegion.getRegionInfo().getRegionName());
-    assertTrue("Please start more than 1 regionserver", hbaseCluster
-        .getRegionServerThreads().size() > 1);
+    int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName());
+    assertTrue("Please start more than 1 regionserver",
+        hbaseCluster.getRegionServerThreads().size() > 1);
     int destServerNum = 0;
     while (destServerNum == originServerNum) {
       destServerNum++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d033cbb7/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
index fabf6d2..5f0c81e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Table;
@@ -31,11 +32,15 @@ import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 
 @Category({ VerySlowRegionServerTests.class, LargeTests.class })
 public class TestAsyncLogRolling extends AbstractTestLogRolling {
+  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+      withLookingForStuckThread(true).build();
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -62,4 +67,4 @@ public class TestAsyncLogRolling extends AbstractTestLogRolling {
     doPut(table, 2);
     assertEquals(numRolledLogFiles + 1, AsyncFSWALProvider.getNumRolledLogFiles(wal));
   }
-}
+}
\ No newline at end of file


[8/9] hbase git commit: HBASE-16780 Since move to protobuf3.1, Cells are limited to 64MB where previous they had no limit Update internal pb to 3.2 from 3.1.

Posted by sy...@apache.org.
HBASE-16780 Since move to protobuf3.1, Cells are limited to 64MB where previous they had no limit Update internal pb to 3.2 from 3.1.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7700a7fa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7700a7fa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7700a7fa

Branch: refs/heads/hbase-12439
Commit: 7700a7fac1262934fe538a96b040793c6ff171ce
Parents: a9682ca
Author: Michael Stack <st...@apache.org>
Authored: Mon Mar 27 07:43:22 2017 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Fri Mar 31 12:44:59 2017 -0700

----------------------------------------------------------------------
 hbase-protocol-shaded/pom.xml | 2 +-
 hbase-server/pom.xml          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7700a7fa/hbase-protocol-shaded/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index 6c0e971..a6b8777 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -33,7 +33,7 @@
     <maven.javadoc.skip>true</maven.javadoc.skip>
     <!--Version of protobuf that hbase uses internally (we shade our pb)
            -->
-    <internal.protobuf.version>3.1.0</internal.protobuf.version>
+    <internal.protobuf.version>3.2.0</internal.protobuf.version>
     <!--The Default target dir-->
     <classes.dir>${project.build.directory}/classes</classes.dir>
     <!--The Default location for sources-->

http://git-wip-us.apache.org/repos/asf/hbase/blob/7700a7fa/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 1eba784..10093cb 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -28,7 +28,7 @@
   </parent>
   <artifactId>hbase-server</artifactId>
   <name>Apache HBase - Server</name>
-  <description>Main functionality for HBase</description>
+  <description>Server functionality for HBase</description>
   <properties>
     <test.build.webapps>target/test-classes/webapps</test.build.webapps>
     <license.bundles.logo>true</license.bundles.logo>


[3/9] hbase git commit: HBASE-17847 update docs to cover guidance on recent Hadoop releases.

Posted by sy...@apache.org.
HBASE-17847 update docs to cover guidance on recent Hadoop releases.

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b290d14e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b290d14e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b290d14e

Branch: refs/heads/hbase-12439
Commit: b290d14e1f7fffa3c06bb19770b53094c3d2459a
Parents: f159557
Author: Sean Busbey <bu...@apache.org>
Authored: Wed Mar 29 08:38:07 2017 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Wed Mar 29 15:20:09 2017 -0500

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/configuration.adoc | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b290d14e/src/main/asciidoc/_chapters/configuration.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index d189c9f..b6b6c15 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -215,6 +215,8 @@ See link:http://wiki.apache.org/hadoop/Distributions%20and%20Commercial%20Suppor
 Hadoop 2.x is faster and includes features, such as short-circuit reads, which will help improve your HBase random read profile.
 Hadoop 2.x also includes important bug fixes that will improve your overall HBase experience.
 HBase 0.98 drops support for Hadoop 1.0, deprecates use of Hadoop 1.1+, and HBase 1.0 will not support Hadoop 1.x.
+
+Hadoop 3.x is still in early access releases and has not yet been sufficiently tested by the HBase community for production use cases.
 ====
 
 Use the following legend to interpret this table:
@@ -241,6 +243,8 @@ Use the following legend to interpret this table:
 |Hadoop-2.6.1+ | NT | NT | NT | NT | S | S | S
 |Hadoop-2.7.0 | X | X | X | X | X | X | X
 |Hadoop-2.7.1+ | NT | NT | NT | NT | S | S | S
+|Hadoop-2.8.0 | X | X | X | X | X | X | X
+|Hadoop-3.0.0-alphax | NT | NT | NT | NT | NT | NT | NT
 |===
 
 .Hadoop Pre-2.6.1 and JDK 1.8 Kerberos
@@ -264,7 +268,13 @@ data loss. This patch is present in Apache Hadoop releases 2.6.1+.
 .Hadoop 2.7.x
 [TIP]
 ====
-Hadoop version 2.7.0 is not tested or supported as the Hadoop PMC has explicitly labeled that release as not being stable.
+Hadoop version 2.7.0 is not tested or supported as the Hadoop PMC has explicitly labeled that release as not being stable. (reference the link:https://s.apache.org/hadoop-2.7.0-announcement[announcement of Apache Hadoop 2.7.0].)
+====
+
+.Hadoop 2.8.x
+[TIP]
+====
+Hadoop version 2.8.0 is not tested or supported as the Hadoop PMC has explicitly labeled that release as not being stable. (reference the link:https://s.apache.org/hadoop-2.8.0-announcement[announcement of Apache Hadoop 2.8.0].)
 ====
 
 .Replace the Hadoop Bundled With HBase!