You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2014/11/25 17:20:02 UTC

[4/6] hbase git commit: HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with HConnection#getTable (0.98, 0.99)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index 66c45a4..0c67154 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -28,19 +28,21 @@ import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableDescriptor;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -48,10 +50,10 @@ import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
 /**
  * Base class for performing operations against tables.
@@ -130,7 +132,7 @@ public abstract class TableEventHandler extends EventHandler {
       if (TableName.META_TABLE_NAME.equals(tableName)) {
         hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper());
       } else {
-        hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName);
+        hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName);
       }
       handleTableOperation(hris);
       if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
@@ -175,32 +177,32 @@ public abstract class TableEventHandler extends EventHandler {
   public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException {
     boolean done = false;
     LOG.info("Bucketing regions by region server...");
-    HTable table = new HTable(masterServices.getConfiguration(), tableName);
-    TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps
-        .newTreeMap();
-    NavigableMap<HRegionInfo, ServerName> hriHserverMapping;
-    try {
-      hriHserverMapping = table.getRegionLocations();
-    } finally {
-      table.close();
+    List<HRegionLocation> regionLocations = null;
+    Connection connection = this.masterServices.getConnection();
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      regionLocations = locator.getAllRegionLocations();
     }
-
+    // Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
+    NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
+    for (HRegionLocation location: regionLocations) {
+      hri2Sn.put(location.getRegionInfo(), location.getServerName());
+    }
+    TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
     List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
     for (HRegionInfo hri : regions) {
-      ServerName rsLocation = hriHserverMapping.get(hri);
-
+      ServerName sn = hri2Sn.get(hri);
       // Skip the offlined split parent region
       // See HBASE-4578 for more information.
-      if (null == rsLocation) {
+      if (null == sn) {
         LOG.info("Skip " + hri);
         continue;
       }
-      if (!serverToRegions.containsKey(rsLocation)) {
+      if (!serverToRegions.containsKey(sn)) {
         LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
-        serverToRegions.put(rsLocation, hriList);
+        serverToRegions.put(sn, hriList);
       }
       reRegions.add(hri);
-      serverToRegions.get(rsLocation).add(hri);
+      serverToRegions.get(sn).add(hri);
     }
 
     LOG.info("Reopening " + reRegions.size() + " regions on "

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
index c5790e1..893fd37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
@@ -125,7 +125,7 @@ public class TruncateTableHandler extends DeleteTableHandler {
     }
 
     // 4. Add regions to META
-    MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(),
+    MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(),
       regionInfos);
 
     // 5. Trigger immediate assignment of the regions in round-robin fashion

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index 16d305e..c9fc93b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
@@ -141,7 +141,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
   protected void addRegionsToMeta(final List<HRegionInfo> regionInfos)
       throws IOException {
     super.addRegionsToMeta(regionInfos);
-    metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), regionInfos);
+    metaChanges.updateMetaParentRegions(this.server.getConnection(), regionInfos);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index af4eb09..efafaf9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -155,7 +155,7 @@ public final class MasterSnapshotVerifier {
     if (TableName.META_TABLE_NAME.equals(tableName)) {
       regions = new MetaTableLocator().getMetaRegions(services.getZooKeeper());
     } else {
-      regions = MetaTableAccessor.getTableRegions(services.getShortCircuitConnection(), tableName);
+      regions = MetaTableAccessor.getTableRegions(services.getConnection(), tableName);
     }
     // Remove the non-default regions
     RegionReplicaUtil.removeNonDefaultRegions(regions);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index ff074e8..57895e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
   @Override
   protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
     MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
-    Connection conn = masterServices.getShortCircuitConnection();
+    Connection conn = masterServices.getConnection();
     FileSystem fs = fileSystemManager.getFileSystem();
     Path rootDir = fileSystemManager.getRootDir();
     TableName tableName = hTableDescriptor.getTableName();
@@ -163,7 +163,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
       if (metaChanges.hasRegionsToRestore()) {
         MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore());
       }
-      metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), hris);
+      metaChanges.updateMetaParentRegions(this.server.getConnection(), hris);
 
       // At this point the restore is complete. Next step is enabling the table.
       LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 65c7670..d2e11a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -721,7 +721,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
 
     // Execute the restore/clone operation
-    if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) {
+    if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
       if (master.getTableStateManager().isTableState(
           TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
         throw new UnsupportedOperationException("Table '" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 94275c8..5fd4aaa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -174,7 +174,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
           server.getZooKeeper());
       } else {
         regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
-          server.getShortCircuitConnection(), snapshotTable, false);
+          server.getConnection(), snapshotTable, false);
       }
 
       // run the snapshot

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index bc248b9..3b0f1e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -132,7 +132,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
           master.getZooKeeper());
       } else {
         regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
-          master.getShortCircuitConnection(), tableName, false);
+          master.getConnection(), tableName, false);
       }
     } catch (InterruptedException e1) {
       String msg = "Failed to get regions for '" + desc.getInstance() + "'";

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 885862c..6a57156 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -23,31 +23,21 @@ import java.util.HashSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
-import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
-import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType;
-import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
 
 /**
  * Master Quota Manager.
@@ -80,7 +70,7 @@ public class MasterQuotaManager {
     }
 
     // Create the quota table if missing
-    if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(),
+    if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
           QuotaUtil.QUOTA_TABLE_NAME)) {
       LOG.info("Quota table not found. Creating...");
       createQuotaTable();
@@ -101,10 +91,6 @@ public class MasterQuotaManager {
     return enabled;
   }
 
-  private Configuration getConfiguration() {
-    return masterServices.getConfiguration();
-  }
-
   /* ==========================================================================
    *  Admin operations to manage the quota table
    */
@@ -152,15 +138,15 @@ public class MasterQuotaManager {
     setQuota(req, new SetQuotaOperations() {
       @Override
       public Quotas fetch() throws IOException {
-        return QuotaUtil.getUserQuota(getConfiguration(), userName);
+        return QuotaUtil.getUserQuota(masterServices.getConnection(), userName);
       }
       @Override
       public void update(final Quotas quotas) throws IOException {
-        QuotaUtil.addUserQuota(getConfiguration(), userName, quotas);
+        QuotaUtil.addUserQuota(masterServices.getConnection(), userName, quotas);
       }
       @Override
       public void delete() throws IOException {
-        QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName);
+        QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName);
       }
       @Override
       public void preApply(final Quotas quotas) throws IOException {
@@ -178,15 +164,15 @@ public class MasterQuotaManager {
     setQuota(req, new SetQuotaOperations() {
       @Override
       public Quotas fetch() throws IOException {
-        return QuotaUtil.getUserQuota(getConfiguration(), userName, table);
+        return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, table);
       }
       @Override
       public void update(final Quotas quotas) throws IOException {
-        QuotaUtil.addUserQuota(getConfiguration(), userName, table, quotas);
+        QuotaUtil.addUserQuota(masterServices.getConnection(), userName, table, quotas);
       }
       @Override
       public void delete() throws IOException {
-        QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, table);
+        QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, table);
       }
       @Override
       public void preApply(final Quotas quotas) throws IOException {
@@ -204,15 +190,15 @@ public class MasterQuotaManager {
     setQuota(req, new SetQuotaOperations() {
       @Override
       public Quotas fetch() throws IOException {
-        return QuotaUtil.getUserQuota(getConfiguration(), userName, namespace);
+        return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, namespace);
       }
       @Override
       public void update(final Quotas quotas) throws IOException {
-        QuotaUtil.addUserQuota(getConfiguration(), userName, namespace, quotas);
+        QuotaUtil.addUserQuota(masterServices.getConnection(), userName, namespace, quotas);
       }
       @Override
       public void delete() throws IOException {
-        QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, namespace);
+        QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, namespace);
       }
       @Override
       public void preApply(final Quotas quotas) throws IOException {
@@ -230,15 +216,15 @@ public class MasterQuotaManager {
     setQuota(req, new SetQuotaOperations() {
       @Override
       public Quotas fetch() throws IOException {
-        return QuotaUtil.getTableQuota(getConfiguration(), table);
+        return QuotaUtil.getTableQuota(masterServices.getConnection(), table);
       }
       @Override
       public void update(final Quotas quotas) throws IOException {
-        QuotaUtil.addTableQuota(getConfiguration(), table, quotas);
+        QuotaUtil.addTableQuota(masterServices.getConnection(), table, quotas);
       }
       @Override
       public void delete() throws IOException {
-        QuotaUtil.deleteTableQuota(getConfiguration(), table);
+        QuotaUtil.deleteTableQuota(masterServices.getConnection(), table);
       }
       @Override
       public void preApply(final Quotas quotas) throws IOException {
@@ -256,15 +242,15 @@ public class MasterQuotaManager {
     setQuota(req, new SetQuotaOperations() {
       @Override
       public Quotas fetch() throws IOException {
-        return QuotaUtil.getNamespaceQuota(getConfiguration(), namespace);
+        return QuotaUtil.getNamespaceQuota(masterServices.getConnection(), namespace);
       }
       @Override
       public void update(final Quotas quotas) throws IOException {
-        QuotaUtil.addNamespaceQuota(getConfiguration(), namespace, quotas);
+        QuotaUtil.addNamespaceQuota(masterServices.getConnection(), namespace, quotas);
       }
       @Override
       public void delete() throws IOException {
-        QuotaUtil.deleteNamespaceQuota(getConfiguration(), namespace);
+        QuotaUtil.deleteNamespaceQuota(masterServices.getConnection(), namespace);
       }
       @Override
       public void preApply(final Quotas quotas) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
index c44a737..8cd402d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
@@ -23,19 +23,16 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -240,7 +237,7 @@ public class QuotaCache implements Stoppable {
         @Override
         public Map<String, QuotaState> fetchEntries(final List<Get> gets)
             throws IOException {
-          return QuotaUtil.fetchNamespaceQuotas(QuotaCache.this.getConfiguration(), gets);
+          return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets);
         }
       });
     }
@@ -255,7 +252,7 @@ public class QuotaCache implements Stoppable {
         @Override
         public Map<TableName, QuotaState> fetchEntries(final List<Get> gets)
             throws IOException {
-          return QuotaUtil.fetchTableQuotas(QuotaCache.this.getConfiguration(), gets);
+          return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets);
         }
       });
     }
@@ -272,7 +269,7 @@ public class QuotaCache implements Stoppable {
         @Override
         public Map<String, UserQuotaState> fetchEntries(final List<Get> gets)
             throws IOException {
-          return QuotaUtil.fetchUserQuotas(QuotaCache.this.getConfiguration(), gets);
+          return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets);
         }
       });
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
index 5db30eb..bff648d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -19,15 +19,12 @@
 package org.apache.hadoop.hbase.quotas;
 
 import java.io.IOException;
-
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -35,18 +32,19 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * Helper class to interact with the quota table
@@ -85,90 +83,89 @@ public class QuotaUtil extends QuotaTableUtil {
   /* =========================================================================
    *  Quota "settings" helpers
    */
-  public static void addTableQuota(final Configuration conf, final TableName table,
+  public static void addTableQuota(final Connection connection, final TableName table,
       final Quotas data) throws IOException {
-    addQuotas(conf, getTableRowKey(table), data);
+    addQuotas(connection, getTableRowKey(table), data);
   }
 
-  public static void deleteTableQuota(final Configuration conf, final TableName table)
+  public static void deleteTableQuota(final Connection connection, final TableName table)
       throws IOException {
-    deleteQuotas(conf, getTableRowKey(table));
+    deleteQuotas(connection, getTableRowKey(table));
   }
 
-  public static void addNamespaceQuota(final Configuration conf, final String namespace,
+  public static void addNamespaceQuota(final Connection connection, final String namespace,
       final Quotas data) throws IOException {
-    addQuotas(conf, getNamespaceRowKey(namespace), data);
+    addQuotas(connection, getNamespaceRowKey(namespace), data);
   }
 
-  public static void deleteNamespaceQuota(final Configuration conf, final String namespace)
+  public static void deleteNamespaceQuota(final Connection connection, final String namespace)
       throws IOException {
-    deleteQuotas(conf, getNamespaceRowKey(namespace));
+    deleteQuotas(connection, getNamespaceRowKey(namespace));
   }
 
-  public static void addUserQuota(final Configuration conf, final String user,
+  public static void addUserQuota(final Connection connection, final String user,
       final Quotas data) throws IOException {
-    addQuotas(conf, getUserRowKey(user), data);
+    addQuotas(connection, getUserRowKey(user), data);
   }
 
-  public static void addUserQuota(final Configuration conf, final String user,
+  public static void addUserQuota(final Connection connection, final String user,
       final TableName table, final Quotas data) throws IOException {
-    addQuotas(conf, getUserRowKey(user),
-        getSettingsQualifierForUserTable(table), data);
+    addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table), data);
   }
 
-  public static void addUserQuota(final Configuration conf, final String user,
+  public static void addUserQuota(final Connection connection, final String user,
       final String namespace, final Quotas data) throws IOException {
-    addQuotas(conf, getUserRowKey(user),
+    addQuotas(connection, getUserRowKey(user),
         getSettingsQualifierForUserNamespace(namespace), data);
   }
 
-  public static void deleteUserQuota(final Configuration conf, final String user)
+  public static void deleteUserQuota(final Connection connection, final String user)
       throws IOException {
-    deleteQuotas(conf, getUserRowKey(user));
+    deleteQuotas(connection, getUserRowKey(user));
   }
 
-  public static void deleteUserQuota(final Configuration conf, final String user,
+  public static void deleteUserQuota(final Connection connection, final String user,
       final TableName table) throws IOException {
-    deleteQuotas(conf, getUserRowKey(user),
+    deleteQuotas(connection, getUserRowKey(user),
         getSettingsQualifierForUserTable(table));
   }
 
-  public static void deleteUserQuota(final Configuration conf, final String user,
+  public static void deleteUserQuota(final Connection connection, final String user,
       final String namespace) throws IOException {
-    deleteQuotas(conf, getUserRowKey(user),
+    deleteQuotas(connection, getUserRowKey(user),
         getSettingsQualifierForUserNamespace(namespace));
   }
 
-  private static void addQuotas(final Configuration conf, final byte[] rowKey,
+  private static void addQuotas(final Connection connection, final byte[] rowKey,
       final Quotas data) throws IOException {
-    addQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS, data);
+    addQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS, data);
   }
 
-  private static void addQuotas(final Configuration conf, final byte[] rowKey,
+  private static void addQuotas(final Connection connection, final byte[] rowKey,
       final byte[] qualifier, final Quotas data) throws IOException {
     Put put = new Put(rowKey);
     put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data));
-    doPut(conf, put);
+    doPut(connection, put);
   }
 
-  private static void deleteQuotas(final Configuration conf, final byte[] rowKey)
+  private static void deleteQuotas(final Connection connection, final byte[] rowKey)
       throws IOException {
-    deleteQuotas(conf, rowKey, null);
+    deleteQuotas(connection, rowKey, null);
   }
 
-  private static void deleteQuotas(final Configuration conf, final byte[] rowKey,
+  private static void deleteQuotas(final Connection connection, final byte[] rowKey,
       final byte[] qualifier) throws IOException {
     Delete delete = new Delete(rowKey);
     if (qualifier != null) {
       delete.deleteColumns(QUOTA_FAMILY_INFO, qualifier);
     }
-    doDelete(conf, delete);
+    doDelete(connection, delete);
   }
 
-  public static Map<String, UserQuotaState> fetchUserQuotas(final Configuration conf,
+  public static Map<String, UserQuotaState> fetchUserQuotas(final Connection connection,
       final List<Get> gets) throws IOException {
     long nowTs = EnvironmentEdgeManager.currentTime();
-    Result[] results = doGet(conf, gets);
+    Result[] results = doGet(connection, gets);
 
     Map<String, UserQuotaState> userQuotas = new HashMap<String, UserQuotaState>(results.length);
     for (int i = 0; i < results.length; ++i) {
@@ -207,9 +204,9 @@ public class QuotaUtil extends QuotaTableUtil {
     return userQuotas;
   }
 
-  public static Map<TableName, QuotaState> fetchTableQuotas(final Configuration conf,
+  public static Map<TableName, QuotaState> fetchTableQuotas(final Connection connection,
       final List<Get> gets) throws IOException {
-    return fetchGlobalQuotas("table", conf, gets, new KeyFromRow<TableName>() {
+    return fetchGlobalQuotas("table", connection, gets, new KeyFromRow<TableName>() {
       @Override
       public TableName getKeyFromRow(final byte[] row) {
         assert isTableRowKey(row);
@@ -218,9 +215,9 @@ public class QuotaUtil extends QuotaTableUtil {
     });
   }
 
-  public static Map<String, QuotaState> fetchNamespaceQuotas(final Configuration conf,
+  public static Map<String, QuotaState> fetchNamespaceQuotas(final Connection connection,
       final List<Get> gets) throws IOException {
-    return fetchGlobalQuotas("namespace", conf, gets, new KeyFromRow<String>() {
+    return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow<String>() {
       @Override
       public String getKeyFromRow(final byte[] row) {
         assert isNamespaceRowKey(row);
@@ -230,9 +227,10 @@ public class QuotaUtil extends QuotaTableUtil {
   }
 
   public static <K> Map<K, QuotaState> fetchGlobalQuotas(final String type,
-      final Configuration conf, final List<Get> gets, final KeyFromRow<K> kfr) throws IOException {
+      final Connection connection, final List<Get> gets, final KeyFromRow<K> kfr)
+  throws IOException {
     long nowTs = EnvironmentEdgeManager.currentTime();
-    Result[] results = doGet(conf, gets);
+    Result[] results = doGet(connection, gets);
 
     Map<K, QuotaState> globalQuotas = new HashMap<K, QuotaState>(results.length);
     for (int i = 0; i < results.length; ++i) {
@@ -266,23 +264,17 @@ public class QuotaUtil extends QuotaTableUtil {
   /* =========================================================================
    *  HTable helpers
    */
-  private static void doPut(final Configuration conf, final Put put)
-      throws IOException {
-    HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME);
-    try {
+  private static void doPut(final Connection connection, final Put put)
+  throws IOException {
+    try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
       table.put(put);
-    } finally {
-      table.close();
     }
   }
 
-  private static void doDelete(final Configuration conf, final Delete delete)
-      throws IOException {
-    HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME);
-    try {
+  private static void doDelete(final Connection connection, final Delete delete)
+  throws IOException {
+    try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
       table.delete(delete);
-    } finally {
-      table.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 464ad7e..7169867 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -76,9 +76,9 @@ import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -207,12 +207,12 @@ public class HRegionServer extends HasThread implements
 
   protected HeapMemoryManager hMemManager;
 
-  /*
-   * Short-circuit (ie. bypassing RPC layer) HConnection to this Server
-   * to be used internally for miscellaneous needs. Initialized at the server startup
-   * and closed when server shuts down. Clients must never close it explicitly.
+  /**
+   * Cluster connection to be shared by services.
+   * Initialized at server startup and closed when server shuts down.
+   * Clients must never close it explicitly.
    */
-  protected HConnection shortCircuitConnection;
+  protected ClusterConnection clusterConnection;
 
   /*
    * Long-living meta table locator, which is created when the server is started and stopped
@@ -605,15 +605,20 @@ public class HRegionServer extends HasThread implements
   }
 
   /**
-   * Create wrapped short-circuit connection to this server.
-   * In its own method so can intercept and mock it over in tests.
+   * Create a 'smarter' HConnection, one that is capable of by-passing RPC if the request is to
+   * the local server.  Safe to use going to local or remote server.
+   * Create this instance in a method can be intercepted and mocked in tests.
    * @throws IOException
    */
-  protected HConnection createShortCircuitConnection() throws IOException {
+  @VisibleForTesting
+  protected ClusterConnection createClusterConnection() throws IOException {
+    // Create a cluster connection that when appropriate, can short-circuit and go directly to the
+    // local server if the request is to the local server bypassing RPC. Can be used for both local
+    // and remote invocations.
     return ConnectionUtils.createShortCircuitHConnection(
       ConnectionFactory.createConnection(conf), serverName, rpcServices, rpcServices);
   }
-  
+
   /**
    * Run test on configured codecs to make sure supporting libs are in place.
    * @param c
@@ -636,6 +641,17 @@ public class HRegionServer extends HasThread implements
   }
 
   /**
+   * Setup our cluster connection if not already initialized.
+   * @throws IOException
+   */
+  protected synchronized void setupClusterConnection() throws IOException {
+    if (clusterConnection == null) {
+      clusterConnection = createClusterConnection();
+      metaTableLocator = new MetaTableLocator();
+    }
+  }
+
+  /**
    * All initialization needed before we go register with Master.
    *
    * @throws IOException
@@ -643,12 +659,7 @@ public class HRegionServer extends HasThread implements
    */
   private void preRegistrationInitialization(){
     try {
-      synchronized (this) {
-        if (shortCircuitConnection == null) {
-          shortCircuitConnection = createShortCircuitConnection();
-          metaTableLocator = new MetaTableLocator();
-        }
-      }
+      setupClusterConnection();
 
       // Health checker thread.
       if (isHealthCheckerConfigured()) {
@@ -946,13 +957,13 @@ public class HRegionServer extends HasThread implements
 
     // so callers waiting for meta without timeout can stop
     if (this.metaTableLocator != null) this.metaTableLocator.stop();
-    if (this.shortCircuitConnection != null && !shortCircuitConnection.isClosed()) {
+    if (this.clusterConnection != null && !clusterConnection.isClosed()) {
       try {
-        this.shortCircuitConnection.close();
+        this.clusterConnection.close();
       } catch (IOException e) {
         // Although the {@link Closeable} interface throws an {@link
         // IOException}, in reality, the implementation would never do that.
-        LOG.error("Attempt to close server's short circuit HConnection failed.", e);
+        LOG.warn("Attempt to close server's short circuit HConnection failed.", e);
       }
     }
 
@@ -1737,8 +1748,8 @@ public class HRegionServer extends HasThread implements
   }
 
   @Override
-  public HConnection getShortCircuitConnection() {
-    return this.shortCircuitConnection;
+  public ClusterConnection getConnection() {
+    return this.clusterConnection;
   }
 
   @Override
@@ -1829,7 +1840,7 @@ public class HRegionServer extends HasThread implements
           }
         } else {
           try {
-            MetaTableAccessor.updateRegionLocation(shortCircuitConnection,
+            MetaTableAccessor.updateRegionLocation(clusterConnection,
               hris[0], serverName, openSeqNum);
           } catch (IOException e) {
             LOG.info("Failed to update meta", e);
@@ -3047,7 +3058,7 @@ public class HRegionServer extends HasThread implements
     }
     return result;
   }
-  
+
   public CoprocessorServiceResponse execRegionServerService(final RpcController controller,
       final CoprocessorServiceRequest serviceRequest) throws ServiceException {
     try {
@@ -3094,7 +3105,7 @@ public class HRegionServer extends HasThread implements
       throw new ServiceException(ie);
     }
   }
-  
+
   /**
    * @return The cache config instance used by the regionserver.
    */
@@ -3108,7 +3119,7 @@ public class HRegionServer extends HasThread implements
   protected ConfigurationManager getConfigurationManager() {
     return configurationManager;
   }
-    
+
   /**
    * Reload the configuration from disk.
    */
@@ -3116,6 +3127,6 @@ public class HRegionServer extends HasThread implements
     LOG.info("Reloading the configuration from disk.");
     // Reload the configuration from disk.
     conf.reloadConfiguration();
-    configurationManager.notifyAllObservers(conf);  
+    configurationManager.notifyAllObservers(conf);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
index 377d548..d478bfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
@@ -658,7 +658,7 @@ public class RegionMergeTransaction {
     // Get merge regions if it is a merged region and already has merge
     // qualifier
     Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
-        .getRegionsFromMergeQualifier(services.getShortCircuitConnection(), regionName);
+        .getRegionsFromMergeQualifier(services.getConnection(), regionName);
     if (mergeRegions != null &&
         (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
       // It has merge qualifier

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 0408231..997692f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -24,12 +24,12 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -150,11 +150,6 @@ public class ReplicationSyncUp extends Configured implements Tool {
     }
 
     @Override
-    public HConnection getShortCircuitConnection() {
-      return null;
-    }
-
-    @Override
     public MetaTableLocator getMetaTableLocator() {
       return null;
     }
@@ -181,5 +176,10 @@ public class ReplicationSyncUp extends Configured implements Tool {
     public boolean isStopped() {
       return false;
     }
+
+    @Override
+    public ClusterConnection getConnection() {
+      return null;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 5ca1164..4af28b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -33,7 +33,6 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -44,9 +43,11 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -170,12 +171,11 @@ public class AccessControlLists {
           Bytes.toString(key)+": "+Bytes.toStringBinary(value)
       );
     }
-    Table acls = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-      acls.put(p);
-    } finally {
-      if (acls != null) acls.close();
+    // TODO: Pass in a Connection rather than create one each time.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        table.put(p);
+      }
     }
   }
 
@@ -200,13 +200,12 @@ public class AccessControlLists {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Removing permission "+ userPerm.toString());
     }
-    d.deleteColumns(ACL_LIST_FAMILY, key);
-    Table acls = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-      acls.delete(d);
-    } finally {
-      if (acls != null) acls.close();
+    d.addColumns(ACL_LIST_FAMILY, key);
+    // TODO: Pass in a Connection rather than create one each time.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        table.delete(d);
+      }
     }
   }
 
@@ -220,13 +219,11 @@ public class AccessControlLists {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Removing permissions of removed table "+ tableName);
     }
-
-    Table acls = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-      acls.delete(d);
-    } finally {
-      if (acls != null) acls.close();
+    // TODO: Pass in a Connection rather than create one each time.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        table.delete(d);
+      }
     }
   }
 
@@ -241,12 +238,10 @@ public class AccessControlLists {
       LOG.debug("Removing permissions of removed namespace "+ namespace);
     }
 
-    Table acls = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-      acls.delete(d);
-    } finally {
-      if (acls != null) acls.close();
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        table.delete(d);
+      }
     }
   }
 
@@ -260,41 +255,38 @@ public class AccessControlLists {
       LOG.debug("Removing permissions of removed column " + Bytes.toString(column) +
                 " from table "+ tableName);
     }
-
-    Table acls = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-
-      Scan scan = new Scan();
-      scan.addFamily(ACL_LIST_FAMILY);
-
-      String columnName = Bytes.toString(column);
-      scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(
-                     String.format("(%s%s%s)|(%s%s)$",
-                     ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER,
-                     ACL_KEY_DELIMITER, columnName))));
-
-      Set<byte[]> qualifierSet = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
-      ResultScanner scanner = acls.getScanner(scan);
-      try {
-        for (Result res : scanner) {
-          for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) {
-            qualifierSet.add(q);
+    // TODO: Pass in a Connection rather than create one each time.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        Scan scan = new Scan();
+        scan.addFamily(ACL_LIST_FAMILY);
+
+        String columnName = Bytes.toString(column);
+        scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(
+            String.format("(%s%s%s)|(%s%s)$",
+                ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER,
+                ACL_KEY_DELIMITER, columnName))));
+
+        Set<byte[]> qualifierSet = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
+        ResultScanner scanner = table.getScanner(scan);
+        try {
+          for (Result res : scanner) {
+            for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) {
+              qualifierSet.add(q);
+            }
           }
+        } finally {
+          scanner.close();
         }
-      } finally {
-        scanner.close();
-      }
 
-      if (qualifierSet.size() > 0) {
-        Delete d = new Delete(tableName.getName());
-        for (byte[] qualifier : qualifierSet) {
-          d.deleteColumns(ACL_LIST_FAMILY, qualifier);
+        if (qualifierSet.size() > 0) {
+          Delete d = new Delete(tableName.getName());
+          for (byte[] qualifier : qualifierSet) {
+            d.addColumns(ACL_LIST_FAMILY, qualifier);
+          }
+          table.delete(d);
         }
-        acls.delete(d);
       }
-    } finally {
-      if (acls != null) acls.close();
     }
   }
 
@@ -422,19 +414,20 @@ public class AccessControlLists {
     Scan scan = new Scan();
     scan.addFamily(ACL_LIST_FAMILY);
 
-    Table acls = null;
     ResultScanner scanner = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-      scanner = acls.getScanner(scan);
-      for (Result row : scanner) {
-        ListMultimap<String,TablePermission> resultPerms =
-            parsePermissions(row.getRow(), row);
-        allPerms.put(row.getRow(), resultPerms);
+    // TODO: Pass in a Connection rather than create one each time.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        scanner = table.getScanner(scan);
+        try {
+          for (Result row : scanner) {
+            ListMultimap<String,TablePermission> resultPerms = parsePermissions(row.getRow(), row);
+            allPerms.put(row.getRow(), resultPerms);
+          }
+        } finally {
+          if (scanner != null) scanner.close();
+        }
       }
-    } finally {
-      if (scanner != null) scanner.close();
-      if (acls != null) acls.close();
     }
 
     return allPerms;
@@ -465,20 +458,19 @@ public class AccessControlLists {
 
     // for normal user tables, we just read the table row from _acl_
     ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
-    Table acls = null;
-    try {
-      acls = new HTable(conf, ACL_TABLE_NAME);
-      Get get = new Get(entryName);
-      get.addFamily(ACL_LIST_FAMILY);
-      Result row = acls.get(get);
-      if (!row.isEmpty()) {
-        perms = parsePermissions(entryName, row);
-      } else {
-        LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
-            + Bytes.toString(entryName));
+    // TODO: Pass in a Connection rather than create one each time.
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+        Get get = new Get(entryName);
+        get.addFamily(ACL_LIST_FAMILY);
+        Result row = table.get(get);
+        if (!row.isEmpty()) {
+          perms = parsePermissions(entryName, row);
+        } else {
+          LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
+              + Bytes.toString(entryName));
+        }
       }
-    } finally {
-      if (acls != null) acls.close();
     }
 
     return perms;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 1c6a341..1218368 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -1087,7 +1087,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx)
       throws IOException {
     if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices()
-      .getShortCircuitConnection(), AccessControlLists.ACL_TABLE_NAME)) {
+      .getConnection(), AccessControlLists.ACL_TABLE_NAME)) {
       // initialize the ACL storage table
       AccessControlLists.createACLTable(ctx.getEnvironment().getMasterServices());
     } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
index 3a37049..e3c4f53 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java
@@ -23,13 +23,15 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
 
 import com.google.protobuf.ServiceException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -45,6 +47,7 @@ import org.apache.hadoop.security.token.Token;
  */
 @InterfaceAudience.Private
 public class TokenUtil {
+  // This class is referenced indirectly by User out in common; instances are created by reflection
   private static Log LOG = LogFactory.getLog(TokenUtil.class);
 
   /**
@@ -54,21 +57,19 @@ public class TokenUtil {
    */
   public static Token<AuthenticationTokenIdentifier> obtainToken(
       Configuration conf) throws IOException {
-    Table meta = null;
-    try {
-      meta = new HTable(conf, TableName.META_TABLE_NAME);
-      CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
-      AuthenticationProtos.AuthenticationService.BlockingInterface service =
-          AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
-      AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
-          AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
+    // TODO: Pass in a Connection to used. Will this even work?
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      try (Table meta = connection.getTable(TableName.META_TABLE_NAME)) {
+        CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
+        AuthenticationProtos.AuthenticationService.BlockingInterface service =
+            AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
+        AuthenticationProtos.GetAuthenticationTokenResponse response =
+          service.getAuthenticationToken(null,
+            AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
 
-      return ProtobufUtil.toToken(response.getToken());
-    } catch (ServiceException se) {
-      ProtobufUtil.toIOException(se);
-    } finally {
-      if (meta != null) {
-        meta.close();
+        return ProtobufUtil.toToken(response.getToken());
+      } catch (ServiceException se) {
+        ProtobufUtil.toIOException(se);
       }
     }
     // dummy return for ServiceException catch block

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index aaad8ba..c76f562 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -168,7 +168,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
   public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
     // Need to create the new system table for labels here
     MasterServices master = ctx.getEnvironment().getMasterServices();
-    if (!MetaTableAccessor.tableExists(master.getShortCircuitConnection(), LABELS_TABLE_NAME)) {
+    if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
       HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
       HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
       labelsColumn.setBloomFilterType(BloomType.NONE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6b43007/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 539ba70..23673b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.tool;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -39,15 +40,17 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -228,51 +231,59 @@ public final class Canary implements Tool {
       }
     }
 
-    // start to prepare the stuffs
+    // Start to prepare the stuffs
     Monitor monitor = null;
     Thread monitorThread = null;
     long startTime = 0;
     long currentTimeLength = 0;
+    // Get a connection to use in below.
+    // try-with-resources jdk7 construct. See
+    // http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html
+    try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+      do {
+        // Do monitor !!
+        try {
+          monitor = this.newMonitor(connection, index, args);
+          monitorThread = new Thread(monitor);
+          startTime = System.currentTimeMillis();
+          monitorThread.start();
+          while (!monitor.isDone()) {
+            // wait for 1 sec
+            Thread.sleep(1000);
+            // exit if any error occurs
+            if (this.failOnError && monitor.hasError()) {
+              monitorThread.interrupt();
+              if (monitor.initialized) {
+                System.exit(monitor.errorCode);
+              } else {
+                System.exit(INIT_ERROR_EXIT_CODE);
+              }
+            }
+            currentTimeLength = System.currentTimeMillis() - startTime;
+            if (currentTimeLength > this.timeout) {
+              LOG.error("The monitor is running too long (" + currentTimeLength
+                  + ") after timeout limit:" + this.timeout
+                  + " will be killed itself !!");
+              if (monitor.initialized) {
+                System.exit(TIMEOUT_ERROR_EXIT_CODE);
+              } else {
+                System.exit(INIT_ERROR_EXIT_CODE);
+              }
+              break;
+            }
+          }
 
-    do {
-      // do monitor !!
-      monitor = this.newMonitor(index, args);
-      monitorThread = new Thread(monitor);
-      startTime = System.currentTimeMillis();
-      monitorThread.start();
-      while (!monitor.isDone()) {
-        // wait for 1 sec
-        Thread.sleep(1000);
-        // exit if any error occurs
-        if (this.failOnError && monitor.hasError()) {
-          monitorThread.interrupt();
-          if (monitor.initialized) {
+          if (this.failOnError && monitor.hasError()) {
+            monitorThread.interrupt();
             System.exit(monitor.errorCode);
-          } else {
-            System.exit(INIT_ERROR_EXIT_CODE);
-          }
-        }
-        currentTimeLength = System.currentTimeMillis() - startTime;
-        if (currentTimeLength > this.timeout) {
-          LOG.error("The monitor is running too long (" + currentTimeLength
-              + ") after timeout limit:" + this.timeout
-              + " will be killed itself !!");
-          if (monitor.initialized) {
-            System.exit(TIMEOUT_ERROR_EXIT_CODE);
-          } else {
-            System.exit(INIT_ERROR_EXIT_CODE);
           }
-          break;
+        } finally {
+          if (monitor != null) monitor.close();
         }
-      }
-
-      if (this.failOnError && monitor.hasError()) {
-        monitorThread.interrupt();
-        System.exit(monitor.errorCode);
-      }
 
-      Thread.sleep(interval);
-    } while (interval > 0);
+        Thread.sleep(interval);
+      } while (interval > 0);
+    } // try-with-resources close
 
     return(monitor.errorCode);
   }
@@ -296,13 +307,13 @@ public final class Canary implements Tool {
   }
 
   /**
-   * a Factory method for {@link Monitor}.
-   * Can be overrided by user.
+   * A Factory method for {@link Monitor}.
+   * Can be overridden by user.
    * @param index a start index for monitor target
    * @param args args passed from user
    * @return a Monitor instance
    */
-  public Monitor newMonitor(int index, String[] args) {
+  public Monitor newMonitor(final Connection connection, int index, String[] args) {
     Monitor monitor = null;
     String[] monitorTargets = null;
 
@@ -314,20 +325,20 @@ public final class Canary implements Tool {
 
     if(this.regionServerMode) {
       monitor = new RegionServerMonitor(
-          this.conf,
+          connection,
           monitorTargets,
           this.useRegExp,
           (ExtendedSink)this.sink);
     } else {
-      monitor = new RegionMonitor(this.conf, monitorTargets, this.useRegExp, this.sink);
+      monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink);
     }
     return monitor;
   }
 
   // a Monitor super-class can be extended by users
-  public static abstract class Monitor implements Runnable {
+  public static abstract class Monitor implements Runnable, Closeable {
 
-    protected Configuration config;
+    protected Connection connection;
     protected Admin admin;
     protected String[] targets;
     protected boolean useRegExp;
@@ -345,12 +356,16 @@ public final class Canary implements Tool {
       return errorCode != 0;
     }
 
-    protected Monitor(Configuration config, String[] monitorTargets,
+    @Override
+    public void close() throws IOException {
+      if (this.admin != null) this.admin.close();
+    }
+
+    protected Monitor(Connection connection, String[] monitorTargets,
         boolean useRegExp, Sink sink) {
-      if (null == config)
-        throw new IllegalArgumentException("config shall not be null");
+      if (null == connection) throw new IllegalArgumentException("connection shall not be null");
 
-      this.config = config;
+      this.connection = connection;
       this.targets = monitorTargets;
       this.useRegExp = useRegExp;
       this.sink = sink;
@@ -361,7 +376,7 @@ public final class Canary implements Tool {
     protected boolean initAdmin() {
       if (null == this.admin) {
         try {
-          this.admin = new HBaseAdmin(config);
+          this.admin = this.connection.getAdmin();
         } catch (Exception e) {
           LOG.error("Initial HBaseAdmin failed...", e);
           this.errorCode = INIT_ERROR_EXIT_CODE;
@@ -377,9 +392,9 @@ public final class Canary implements Tool {
   // a monitor for region mode
   private static class RegionMonitor extends Monitor {
 
-    public RegionMonitor(Configuration config, String[] monitorTargets,
+    public RegionMonitor(Connection connection, String[] monitorTargets,
         boolean useRegExp, Sink sink) {
-      super(config, monitorTargets, useRegExp, sink);
+      super(connection, monitorTargets, useRegExp, sink);
     }
 
     @Override
@@ -481,7 +496,7 @@ public final class Canary implements Tool {
     Table table = null;
 
     try {
-      table = new HTable(admin.getConfiguration(), tableDesc.getTableName());
+      table = admin.getConnection().getTable(tableDesc.getTableName());
     } catch (TableNotFoundException e) {
       return;
     }
@@ -556,9 +571,9 @@ public final class Canary implements Tool {
   //a monitor for regionserver mode
   private static class RegionServerMonitor extends Monitor {
 
-    public RegionServerMonitor(Configuration config, String[] monitorTargets,
+    public RegionServerMonitor(Connection connection, String[] monitorTargets,
         boolean useRegExp, ExtendedSink sink) {
-      super(config, monitorTargets, useRegExp, sink);
+      super(connection, monitorTargets, useRegExp, sink);
     }
 
     private ExtendedSink getSink() {
@@ -622,7 +637,7 @@ public final class Canary implements Tool {
         region = entry.getValue().get(0);
         try {
           tableName = region.getTable();
-          table = new HTable(this.admin.getConfiguration(), tableName);
+          table = admin.getConnection().getTable(tableName);
           startKey = region.getStartKey();
           // Can't do a get on empty start row so do a Scan of first element if any instead.
           if(startKey.length > 0) {
@@ -675,18 +690,19 @@ public final class Canary implements Tool {
 
     private Map<String, List<HRegionInfo>> getAllRegionServerByName() {
       Map<String, List<HRegionInfo>> rsAndRMap = new HashMap<String, List<HRegionInfo>>();
-      HTable table = null;
+      Table table = null;
+      RegionLocator regionLocator = null;
       try {
         HTableDescriptor[] tableDescs = this.admin.listTables();
         List<HRegionInfo> regions = null;
         for (HTableDescriptor tableDesc : tableDescs) {
-          table = new HTable(this.admin.getConfiguration(), tableDesc.getTableName());
+          table = this.admin.getConnection().getTable(tableDesc.getTableName());
+          regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName());
 
-          for (Map.Entry<HRegionInfo, ServerName> entry : table
-              .getRegionLocations().entrySet()) {
-            ServerName rs = entry.getValue();
+          for (HRegionLocation location: regionLocator.getAllRegionLocations()) {
+            ServerName rs = location.getServerName();
             String rsName = rs.getHostname();
-            HRegionInfo r = entry.getKey();
+            HRegionInfo r = location.getRegionInfo();
 
             if (rsAndRMap.containsKey(rsName)) {
               regions = rsAndRMap.get(rsName);