You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2014/07/03 04:03:10 UTC

[5/6] HBASE-4495 CatalogTracker has an identity crisis; needs to be cut-back in scope (Mikhail Antonov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index ed50c05..10e4d04 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -38,6 +38,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.RegionException;
@@ -59,8 +61,6 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -142,6 +142,7 @@ import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.KeeperException;
@@ -163,6 +164,8 @@ import com.google.protobuf.ServiceException;
 public class HBaseAdmin implements Admin {
   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
 
+  private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+
   // We use the implementation class rather then the interface because we
   //  need the package protected functions to get the connection to master
   private ClusterConnection connection;
@@ -232,30 +235,6 @@ public class HBaseAdmin implements Admin {
     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
   }
 
-  /**
-   * @return A new CatalogTracker instance; call {@link #cleanupCatalogTracker(CatalogTracker)}
-   * to cleanup the returned catalog tracker.
-   * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
-   * @throws IOException
-   * @see #cleanupCatalogTracker(CatalogTracker)
-   */
-  private synchronized CatalogTracker getCatalogTracker()
-  throws ZooKeeperConnectionException, IOException {
-    CatalogTracker ct = null;
-    try {
-      ct = new CatalogTracker(this.conf);
-      ct.start();
-    } catch (InterruptedException e) {
-      // Let it out as an IOE for now until we redo all so tolerate IEs
-      throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
-    }
-    return ct;
-  }
-
-  private void cleanupCatalogTracker(final CatalogTracker ct) {
-    ct.stop();
-  }
-
   @Override
   public void abort(String why, Throwable e) {
     // Currently does nothing but throw the passed message and exception
@@ -290,17 +269,8 @@ public class HBaseAdmin implements Admin {
    * @return True if table exists already.
    * @throws IOException
    */
-  @Override
-  public boolean tableExists(final TableName tableName)
-  throws IOException {
-    boolean b = false;
-    CatalogTracker ct = getCatalogTracker();
-    try {
-      b = MetaReader.tableExists(ct, tableName);
-    } finally {
-      cleanupCatalogTracker(ct);
-    }
-    return b;
+  public boolean tableExists(final TableName tableName) throws IOException {
+    return MetaTableAccessor.tableExists(connection, tableName);
   }
 
   public boolean tableExists(final byte[] tableName)
@@ -534,7 +504,7 @@ public class HBaseAdmin implements Admin {
         MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
           @Override
           public boolean processRow(Result rowResult) throws IOException {
-            RegionLocations list = MetaReader.getRegionLocations(rowResult);
+            RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
             if (list == null) {
               LOG.warn("No serialized HRegionInfo in " + rowResult);
               return true;
@@ -680,7 +650,7 @@ public class HBaseAdmin implements Admin {
     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
       try {
         HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
-        Scan scan = MetaReader.getScanForTableName(tableName);
+        Scan scan = MetaTableAccessor.getScanForTableName(tableName);
         scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
         ScanRequest request = RequestConverter.buildScanRequest(
           firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
@@ -1405,28 +1375,23 @@ public class HBaseAdmin implements Admin {
    */
   @Override
   public void closeRegion(final byte [] regionname, final String serverName)
-  throws IOException {
-    CatalogTracker ct = getCatalogTracker();
-    try {
-      if (serverName != null) {
-        Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, regionname);
-        if (pair == null || pair.getFirst() == null) {
-          throw new UnknownRegionException(Bytes.toStringBinary(regionname));
-        } else {
-          closeRegion(ServerName.valueOf(serverName), pair.getFirst());
-        }
+      throws IOException {
+    if (serverName != null) {
+      Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
+      if (pair == null || pair.getFirst() == null) {
+        throw new UnknownRegionException(Bytes.toStringBinary(regionname));
       } else {
-        Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, regionname);
-        if (pair == null) {
-          throw new UnknownRegionException(Bytes.toStringBinary(regionname));
-        } else if (pair.getSecond() == null) {
-          throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
-        } else {
-          closeRegion(pair.getSecond(), pair.getFirst());
-        }
+        closeRegion(ServerName.valueOf(serverName), pair.getFirst());
+      }
+    } else {
+      Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
+      if (pair == null) {
+        throw new UnknownRegionException(Bytes.toStringBinary(regionname));
+      } else if (pair.getSecond() == null) {
+        throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
+      } else {
+        closeRegion(pair.getSecond(), pair.getFirst());
       }
-    } finally {
-      cleanupCatalogTracker(ct);
     }
   }
 
@@ -1525,28 +1490,23 @@ public class HBaseAdmin implements Admin {
   @Override
   public void flush(final byte[] tableNameOrRegionName)
   throws IOException, InterruptedException {
-    CatalogTracker ct = getCatalogTracker();
-    try {
-      Pair<HRegionInfo, ServerName> regionServerPair
-        = getRegion(tableNameOrRegionName, ct);
-      if (regionServerPair != null) {
-        if (regionServerPair.getSecond() == null) {
-          throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
-        } else {
-          flush(regionServerPair.getSecond(), regionServerPair.getFirst());
-        }
+    Pair<HRegionInfo, ServerName> regionServerPair
+      = getRegion(tableNameOrRegionName);
+    if (regionServerPair != null) {
+      if (regionServerPair.getSecond() == null) {
+        throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
       } else {
-        final TableName tableName = checkTableExists(
-            TableName.valueOf(tableNameOrRegionName), ct);
-        if (isTableDisabled(tableName)) {
-          LOG.info("Table is disabled: " + tableName.getNameAsString());
-          return;
-        }
-        execProcedure("flush-table-proc", tableName.getNameAsString(),
-          new HashMap<String, String>());
+        flush(regionServerPair.getSecond(), regionServerPair.getFirst());
       }
-    } finally {
-      cleanupCatalogTracker(ct);
+    } else {
+      final TableName tableName = checkTableExists(
+          TableName.valueOf(tableNameOrRegionName));
+      if (isTableDisabled(tableName)) {
+        LOG.info("Table is disabled: " + tableName.getNameAsString());
+        return;
+      }
+      execProcedure("flush-table-proc", tableName.getNameAsString(),
+        new HashMap<String, String>());
     }
   }
 
@@ -1692,10 +1652,10 @@ public class HBaseAdmin implements Admin {
   private void compact(final byte[] tableNameOrRegionName,
     final byte[] columnFamily,final boolean major)
   throws IOException, InterruptedException {
-    CatalogTracker ct = getCatalogTracker();
+    ZooKeeperWatcher zookeeper = null;
     try {
       Pair<HRegionInfo, ServerName> regionServerPair
-        = getRegion(tableNameOrRegionName, ct);
+        = getRegion(tableNameOrRegionName);
       if (regionServerPair != null) {
         if (regionServerPair.getSecond() == null) {
           throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -1704,10 +1664,12 @@ public class HBaseAdmin implements Admin {
         }
       } else {
         final TableName tableName =
-            checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+            checkTableExists(TableName.valueOf(tableNameOrRegionName));
+        zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
+            new ThrowableAbortable());
         List<Pair<HRegionInfo, ServerName>> pairs =
-          MetaReader.getTableRegionsAndLocations(ct,
-              tableName);
+          MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
+            tableName);
         for (Pair<HRegionInfo, ServerName> pair: pairs) {
           if (pair.getFirst().isOffline()) continue;
           if (pair.getSecond() == null) continue;
@@ -1723,7 +1685,9 @@ public class HBaseAdmin implements Admin {
         }
       }
     } finally {
-      cleanupCatalogTracker(ct);
+      if (zookeeper != null) {
+        zookeeper.close();
+      }
     }
   }
 
@@ -2034,10 +1998,10 @@ public class HBaseAdmin implements Admin {
   @Override
   public void split(final byte[] tableNameOrRegionName,
       final byte [] splitPoint) throws IOException, InterruptedException {
-    CatalogTracker ct = getCatalogTracker();
+    ZooKeeperWatcher zookeeper = null;
     try {
       Pair<HRegionInfo, ServerName> regionServerPair
-        = getRegion(tableNameOrRegionName, ct);
+        = getRegion(tableNameOrRegionName);
       if (regionServerPair != null) {
         if (regionServerPair.getSecond() == null) {
             throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -2046,10 +2010,12 @@ public class HBaseAdmin implements Admin {
         }
       } else {
         final TableName tableName =
-            checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+            checkTableExists(TableName.valueOf(tableNameOrRegionName));
+        zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
+            new ThrowableAbortable());
         List<Pair<HRegionInfo, ServerName>> pairs =
-          MetaReader.getTableRegionsAndLocations(ct,
-              tableName);
+          MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
+            tableName);
         for (Pair<HRegionInfo, ServerName> pair: pairs) {
           // May not be a server for a particular row
           if (pair.getSecond() == null) continue;
@@ -2063,7 +2029,9 @@ public class HBaseAdmin implements Admin {
         }
       }
     } finally {
-      cleanupCatalogTracker(ct);
+      if (zookeeper != null) {
+        zookeeper.close();
+      }
     }
   }
 
@@ -2116,19 +2084,19 @@ public class HBaseAdmin implements Admin {
 
   /**
    * @param tableNameOrRegionName Name of a table or name of a region.
-   * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
    * @return a pair of HRegionInfo and ServerName if <code>tableNameOrRegionName</code> is
-   *  a verified region name (we call {@link  MetaReader#getRegion( CatalogTracker, byte[])}
+   *  a verified region name (we call {@link
+   *  MetaTableAccessor#getRegion(HConnection, byte[])}
    *  else null.
    * Throw an exception if <code>tableNameOrRegionName</code> is null.
    * @throws IOException
    */
-  Pair<HRegionInfo, ServerName> getRegion(final byte[] tableNameOrRegionName,
-      final CatalogTracker ct) throws IOException {
+  Pair<HRegionInfo, ServerName> getRegion(final byte[] tableNameOrRegionName) throws IOException {
     if (tableNameOrRegionName == null) {
       throw new IllegalArgumentException("Pass a table name or region name");
     }
-    Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, tableNameOrRegionName);
+    Pair<HRegionInfo, ServerName> pair =
+      MetaTableAccessor.getRegion(connection, tableNameOrRegionName);
     if (pair == null) {
       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
@@ -2170,16 +2138,10 @@ public class HBaseAdmin implements Admin {
             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
     }
-    CatalogTracker ct = getCatalogTracker();
     byte[] tmp = regionNameOrEncodedRegionName;
-    try {
-      Pair<HRegionInfo, ServerName> regionServerPair
-        = getRegion(regionNameOrEncodedRegionName, ct);
-      if (regionServerPair != null && regionServerPair.getFirst() != null) {
-        tmp = regionServerPair.getFirst().getRegionName();
-      }
-    } finally {
-      cleanupCatalogTracker(ct);
+    Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
+    if (regionServerPair != null && regionServerPair.getFirst() != null) {
+      tmp = regionServerPair.getFirst().getRegionName();
     }
     return tmp;
   }
@@ -2187,16 +2149,13 @@ public class HBaseAdmin implements Admin {
   /**
    * Check if table exists or not
    * @param tableName Name of a table.
-   * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
    * @return tableName instance
    * @throws IOException if a remote or network exception occurs.
    * @throws TableNotFoundException if table does not exist.
    */
-  //TODO rename this method
-  private TableName checkTableExists(
-      final TableName tableName, CatalogTracker ct)
+  private TableName checkTableExists(final TableName tableName)
       throws IOException {
-    if (!MetaReader.tableExists(ct, tableName)) {
+    if (!MetaTableAccessor.tableExists(connection, tableName)) {
       throw new TableNotFoundException(tableName);
     }
     return tableName;
@@ -2492,12 +2451,14 @@ public class HBaseAdmin implements Admin {
   @Override
   public List<HRegionInfo> getTableRegions(final TableName tableName)
   throws IOException {
-    CatalogTracker ct = getCatalogTracker();
+    ZooKeeperWatcher zookeeper =
+      new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
+        new ThrowableAbortable());
     List<HRegionInfo> Regions = null;
     try {
-      Regions = MetaReader.getTableRegions(ct, tableName, true);
+      Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
     } finally {
-      cleanupCatalogTracker(ct);
+      zookeeper.close();
     }
     return Regions;
   }
@@ -2614,10 +2575,11 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
       throws IOException, InterruptedException {
     CompactionState state = CompactionState.NONE;
-    CatalogTracker ct = getCatalogTracker();
+    ZooKeeperWatcher zookeeper =
+      new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
+        new ThrowableAbortable());
     try {
-      Pair<HRegionInfo, ServerName> regionServerPair
-        = getRegion(tableNameOrRegionName, ct);
+      Pair<HRegionInfo, ServerName> regionServerPair = getRegion(tableNameOrRegionName);
       if (regionServerPair != null) {
         if (regionServerPair.getSecond() == null) {
           throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -2631,9 +2593,9 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
         }
       } else {
         final TableName tableName =
-            checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+            checkTableExists(TableName.valueOf(tableNameOrRegionName));
         List<Pair<HRegionInfo, ServerName>> pairs =
-          MetaReader.getTableRegionsAndLocations(ct, tableName);
+          MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
         for (Pair<HRegionInfo, ServerName> pair: pairs) {
           if (pair.getFirst().isOffline()) continue;
           if (pair.getSecond() == null) continue;
@@ -2682,7 +2644,7 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
     } catch (ServiceException se) {
       throw ProtobufUtil.getRemoteException(se);
     } finally {
-      cleanupCatalogTracker(ct);
+      zookeeper.close();
     }
     return state;
   }
@@ -3546,4 +3508,20 @@ public synchronized  byte[][] rollHLogWriter(String serverName)
   public CoprocessorRpcChannel coprocessorService() {
     return new MasterCoprocessorRpcChannel(connection);
   }
+
+  /**
+   * Simple {@link Abortable}, throwing RuntimeException on abort.
+   */
+  private static class ThrowableAbortable implements Abortable {
+
+    @Override
+    public void abort(String why, Throwable e) {
+      throw new RuntimeException(why, e);
+    }
+
+    @Override
+    public boolean isAborted() {
+      return true;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index 8e27dfd..77e9a5e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
@@ -49,7 +48,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
  * connections are managed at a lower level.
  *
  * <p>HConnections are used by {@link HTable} mostly but also by
- * {@link HBaseAdmin}, and {@link CatalogTracker}.  HConnection instances can be shared.  Sharing
+ * {@link HBaseAdmin}, and {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}.
+ * HConnection instances can be shared.  Sharing
  * is usually what you want because rather than each HConnection instance
  * having to do its own discovery of regions out on the cluster, instead, all
  * clients get to share the one cache of locations.  {@link HConnectionManager} does the

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 08f4ee2..062779a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -33,11 +33,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
 
@@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.util.ExceptionUtil;
  * see HBASE-5986, and {@link DefaultMetaScannerVisitor} for details. </p>
  */
 @InterfaceAudience.Private
-//TODO: merge this to MetaReader, get rid of it.
+//TODO: merge this to MetaTableAccessor, get rid of it.
 public class MetaScanner {
   private static final Log LOG = LogFactory.getLog(MetaScanner.class);
   /**
@@ -227,7 +227,7 @@ public class MetaScanner {
    * table Result.
    * @param data a Result object from the catalog table scan
    * @return HRegionInfo or null
-   * @deprecated Use {@link MetaReader#getRegionLocations(Result)}
+   * @deprecated Use {@link org.apache.hadoop.hbase.MetaTableAccessor#getRegionLocations(Result)}
    */
   @Deprecated
   public static HRegionInfo getHRegionInfo(Result data) {
@@ -252,7 +252,7 @@ public class MetaScanner {
             return true;
           }
 
-          RegionLocations locations = MetaReader.getRegionLocations(result);
+          RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
           if (locations == null) return true;
           for (HRegionLocation loc : locations.getRegionLocations()) {
             if (loc != null) {
@@ -285,7 +285,7 @@ public class MetaScanner {
     MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
       @Override
       public boolean processRowInternal(Result result) throws IOException {
-        RegionLocations locations = MetaReader.getRegionLocations(result);
+        RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
         if (locations == null) return true;
         for (HRegionLocation loc : locations.getRegionLocations()) {
           if (loc != null) {
@@ -309,7 +309,7 @@ public class MetaScanner {
     MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
       @Override
       public boolean processRowInternal(Result result) throws IOException {
-        RegionLocations locations = MetaReader.getRegionLocations(result);
+        RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
         if (locations == null) return true;
         regions.add(locations);
         return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index ca7ce68..9123d50 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -57,7 +57,7 @@ class ZooKeeperRegistry implements Registry {
       if (LOG.isTraceEnabled()) {
         LOG.trace("Looking up meta region location in ZK," + " connection=" + this);
       }
-      ServerName servername = MetaRegionTracker.blockUntilAvailable(zkw, hci.rpcTimeout);
+      ServerName servername = new MetaTableLocator().blockUntilAvailable(zkw, hci.rpcTimeout);
       if (LOG.isTraceEnabled()) {
         LOG.trace("Looked up meta region location, connection=" + this +
           "; serverName=" + ((servername == null) ? "null" : servername));

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
deleted file mode 100644
index a3a7d02..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Tracks the meta region server location node in zookeeper.
- * Meta region location is set by <code>RegionServerServices</code>.
- * This class has a watcher on the meta location and notices changes.
- */
-@InterfaceAudience.Private
-public class MetaRegionTracker extends ZooKeeperNodeTracker {
-  /**
-   * Creates a meta region location tracker.
-   *
-   * <p>After construction, use {@link #start} to kick off tracking.
-   *
-   * @param watcher
-   * @param abortable
-   */
-  public MetaRegionTracker(ZooKeeperWatcher watcher, Abortable abortable) {
-    super(watcher, watcher.metaServerZNode, abortable);
-  }
-
-  /**
-   * Checks if the meta region location is available.
-   * @return true if meta region location is available, false if not
-   */
-  public boolean isLocationAvailable() {
-    return super.getData(true) != null;
-  }
-
-  /**
-   * Gets the meta region location, if available.  Does not block.  Sets a watcher.
-   * @return server name or null if we failed to get the data.
-   * @throws InterruptedException
-   */
-  public ServerName getMetaRegionLocation() throws InterruptedException {
-    try {
-      return ServerName.parseFrom(super.getData(true));
-    } catch (DeserializationException e) {
-      LOG.warn("Failed parse", e);
-      return null;
-    }
-  }
-
-  /**
-   * Gets the meta region location, if available.  Does not block.  Does not set
-   * a watcher (In this regard it differs from {@link #getMetaRegionLocation}.
-   * @param zkw
-   * @return server name or null if we failed to get the data.
-   * @throws KeeperException
-   */
-  public static ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw)
-  throws KeeperException {
-    try {
-      return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode));
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      return null;
-    }
-  }
-
-  /**
-   * Gets the meta region location, if available, and waits for up to the
-   * specified timeout if not immediately available.
-   * Given the zookeeper notification could be delayed, we will try to
-   * get the latest data.
-   * @param timeout maximum time to wait, in millis
-   * @return server name for server hosting meta region formatted as per
-   * {@link ServerName}, or null if none available
-   * @throws InterruptedException if interrupted while waiting
-   */
-  public ServerName waitMetaRegionLocation(long timeout)
-  throws InterruptedException {
-    if (false == checkIfBaseNodeAvailable()) {
-      String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
-          + "There could be a mismatch with the one configured in the master.";
-      LOG.error(errorMsg);
-      throw new IllegalArgumentException(errorMsg);
-    }
-    try {
-      return ServerName.parseFrom(super.blockUntilAvailable(timeout, true));
-    } catch (DeserializationException e) {
-      LOG.warn("Failed parse", e);
-      return null;
-    }
-  }
-
-  /**
-   * Sets the location of <code>hbase:meta</code> in ZooKeeper to the
-   * specified server address.
-   * @param zookeeper zookeeper reference
-   * @param location The server hosting <code>hbase:meta</code>
-   * @throws KeeperException unexpected zookeeper exception
-   */
-  public static void setMetaLocation(ZooKeeperWatcher zookeeper,
-                                     final ServerName location)
-  throws KeeperException {
-    LOG.info("Setting hbase:meta region location in ZooKeeper as " + location);
-    // Make the MetaRegionServer pb and then get its bytes and save this as
-    // the znode content.
-    byte [] data = toByteArray(location);
-    try {
-      ZKUtil.createAndWatch(zookeeper, zookeeper.metaServerZNode, data);
-    } catch(KeeperException.NodeExistsException nee) {
-      LOG.debug("META region location already existed, updated location");
-      ZKUtil.setData(zookeeper, zookeeper.metaServerZNode, data);
-    }
-  }
-
-  /**
-   * Build up the znode content.
-   * @param sn What to put into the znode.
-   * @return The content of the meta-region-server znode
-   */
-  static byte [] toByteArray(final ServerName sn) {
-    // ZNode content is a pb message preceded by some pb magic.
-    HBaseProtos.ServerName pbsn =
-      HBaseProtos.ServerName.newBuilder()
-                            .setHostName(sn.getHostname())
-                            .setPort(sn.getPort())
-                            .setStartCode(sn.getStartcode())
-                            .build();
-
-    ZooKeeperProtos.MetaRegionServer pbrsr =
-      ZooKeeperProtos.MetaRegionServer.newBuilder()
-                                      .setServer(pbsn)
-                                      .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
-                                      .build();
-    return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
-  }
-
-  /**
-   * Deletes the location of <code>hbase:meta</code> in ZooKeeper.
-   * @param zookeeper zookeeper reference
-   * @throws KeeperException unexpected zookeeper exception
-   */
-  public static void deleteMetaLocation(ZooKeeperWatcher zookeeper)
-  throws KeeperException {
-    LOG.info("Unsetting hbase:meta region location in ZooKeeper");
-    try {
-      // Just delete the node.  Don't need any watches.
-      ZKUtil.deleteNode(zookeeper, zookeeper.metaServerZNode);
-    } catch(KeeperException.NoNodeException nne) {
-      // Has already been deleted
-    }
-  }
-
-  /**
-   * Wait until the meta region is available.
-   * @param zkw
-   * @param timeout
-   * @return ServerName or null if we timed out.
-   * @throws InterruptedException
-   */
-  public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
-      final long timeout)
-  throws InterruptedException {
-    byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.metaServerZNode, timeout);
-    if (data == null) return null;
-    try {
-      return ServerName.parseFrom(data);
-    } catch (DeserializationException e) {
-      LOG.warn("Failed parse", e);
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index a68d8c9..8f2a9cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent;
@@ -1590,7 +1589,8 @@ public class ZKUtil {
                                               zkw.backupMasterAddressesZNode)) {
         sb.append("\n ").append(child);
       }
-      sb.append("\nRegion server holding hbase:meta: " + MetaRegionTracker.getMetaRegionLocation(zkw));
+      sb.append("\nRegion server holding hbase:meta: "
+        + new MetaTableLocator().getMetaRegionLocation(zkw));
       sb.append("\nRegion servers:");
       for (String child : listChildrenNoWatch(zkw, zkw.rsZNode)) {
         sb.append("\n ").append(child);

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
deleted file mode 100644
index 191251c..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ /dev/null
@@ -1,585 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * Writes region and assignment information to <code>hbase:meta</code>.
- */
-@InterfaceAudience.Private
-public class MetaEditor extends MetaReader {
-
-  // TODO: Strip CatalogTracker from this class.  Its all over and in the end
-  // its only used to get its Configuration so we can get associated
-  // Connection.
-  private static final Log LOG = LogFactory.getLog(MetaEditor.class);
-
-  /**
-   * Generates and returns a Put containing the region into for the catalog table
-   */
-  public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
-  throws IOException {
-    Put put = new Put(regionInfo.getRegionName());
-    addRegionInfo(put, regionInfo);
-    return put;
-  }
-
-  /**
-   * Generates and returns a Delete containing the region info for the catalog
-   * table
-   */
-  public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
-    if (regionInfo == null) {
-      throw new IllegalArgumentException("Can't make a delete for null region");
-    }
-    Delete delete = new Delete(regionInfo.getRegionName());
-    return delete;
-  }
-
-  /**
-   * Adds split daughters to the Put
-   */
-  public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
-    if (splitA != null) {
-      put.addImmutable(
-          HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
-    }
-    if (splitB != null) {
-      put.addImmutable(
-          HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
-    }
-    return put;
-  }
-
-  /**
-   * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
-   * @param ct CatalogTracker on whose back we will ride the edit.
-   * @param p Put to add to hbase:meta
-   * @throws IOException
-   */
-  static void putToMetaTable(final CatalogTracker ct, final Put p)
-  throws IOException {
-    put(MetaReader.getMetaHTable(ct), p);
-  }
-
-  /**
-   * Put the passed <code>p</code> to a catalog table.
-   * @param ct CatalogTracker on whose back we will ride the edit.
-   * @param p Put to add
-   * @throws IOException
-   */
-  static void putToCatalogTable(final CatalogTracker ct, final Put p)
-  throws IOException {
-    put(MetaReader.getCatalogHTable(ct), p);
-  }
-
-  /**
-   * @param t Table to use (will be closed when done).
-   * @param p
-   * @throws IOException
-   */
-  private static void put(final HTable t, final Put p) throws IOException {
-    try {
-      t.put(p);
-    } finally {
-      t.close();
-    }
-  }
-
-  /**
-   * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
-   * @param ct CatalogTracker on whose back we will ride the edit.
-   * @param ps Put to add to hbase:meta
-   * @throws IOException
-   */
-  public static void putsToMetaTable(final CatalogTracker ct, final List<Put> ps)
-  throws IOException {
-    HTable t = MetaReader.getMetaHTable(ct);
-    try {
-      t.put(ps);
-    } finally {
-      t.close();
-    }
-  }
-
-  /**
-   * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
-   * @param ct CatalogTracker on whose back we will ride the edit.
-   * @param d Delete to add to hbase:meta
-   * @throws IOException
-   */
-  static void deleteFromMetaTable(final CatalogTracker ct, final Delete d)
-      throws IOException {
-    List<Delete> dels = new ArrayList<Delete>(1);
-    dels.add(d);
-    deleteFromMetaTable(ct, dels);
-  }
-
-  /**
-   * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
-   * @param ct CatalogTracker on whose back we will ride the edit.
-   * @param deletes Deletes to add to hbase:meta  This list should support #remove.
-   * @throws IOException
-   */
-  public static void deleteFromMetaTable(final CatalogTracker ct, final List<Delete> deletes)
-      throws IOException {
-    HTable t = MetaReader.getMetaHTable(ct);
-    try {
-      t.delete(deletes);
-    } finally {
-      t.close();
-    }
-  }
-
-  /**
-   * Deletes some replica columns corresponding to replicas for the passed rows
-   * @param metaRows
-   * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
-   * @param numReplicasToRemove
-   * @param ct
-   * @throws IOException
-   */
-  public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows, int replicaIndexToDeleteFrom,
-      int numReplicasToRemove, CatalogTracker ct) throws IOException {
-    int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
-    for (byte[] row : metaRows) {
-      Delete deleteReplicaLocations = new Delete(row);
-      for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
-        deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
-            MetaReader.getServerColumn(i));
-        deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
-            MetaReader.getSeqNumColumn(i));
-        deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
-            MetaReader.getStartCodeColumn(i));
-      }
-      deleteFromMetaTable(ct, deleteReplicaLocations);
-    }
-  }
-
-  /**
-   * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
-   * @param ct CatalogTracker on whose back we will ride the edit.
-   * @param mutations Puts and Deletes to execute on hbase:meta
-   * @throws IOException
-   */
-  public static void mutateMetaTable(final CatalogTracker ct, final List<Mutation> mutations)
-      throws IOException {
-    HTable t = MetaReader.getMetaHTable(ct);
-    try {
-      t.batch(mutations);
-    } catch (InterruptedException e) {
-      InterruptedIOException ie = new InterruptedIOException(e.getMessage());
-      ie.initCause(e);
-      throw ie;
-    } finally {
-      t.close();
-    }
-  }
-
-  /**
-   * Adds a hbase:meta row for the specified new region.
-   * @param regionInfo region information
-   * @throws IOException if problem connecting or updating meta
-   */
-  public static void addRegionToMeta(CatalogTracker catalogTracker,
-      HRegionInfo regionInfo)
-  throws IOException {
-    putToMetaTable(catalogTracker, makePutFromRegionInfo(regionInfo));
-    LOG.info("Added " + regionInfo.getRegionNameAsString());
-  }
-
-  /**
-   * Adds a hbase:meta row for the specified new region to the given catalog table. The
-   * HTable is not flushed or closed.
-   * @param meta the HTable for META
-   * @param regionInfo region information
-   * @throws IOException if problem connecting or updating meta
-   */
-  public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo) throws IOException {
-    addRegionToMeta(meta, regionInfo, null, null);
-  }
-
-  /**
-   * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
-   * not add its daughter's as different rows, but adds information about the daughters
-   * in the same row as the parent. Use
-   * {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
-   * if you want to do that.
-   * @param meta the HTable for META
-   * @param regionInfo region information
-   * @param splitA first split daughter of the parent regionInfo
-   * @param splitB second split daughter of the parent regionInfo
-   * @throws IOException if problem connecting or updating meta
-   */
-  public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo,
-      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
-    Put put = makePutFromRegionInfo(regionInfo);
-    addDaughtersToPut(put, splitA, splitB);
-    meta.put(put);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Added " + regionInfo.getRegionNameAsString());
-    }
-  }
-
-  /**
-   * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
-   * not add its daughter's as different rows, but adds information about the daughters
-   * in the same row as the parent. Use
-   * {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
-   * if you want to do that.
-   * @param catalogTracker CatalogTracker on whose back we will ride the edit.
-   * @param regionInfo region information
-   * @param splitA first split daughter of the parent regionInfo
-   * @param splitB second split daughter of the parent regionInfo
-   * @throws IOException if problem connecting or updating meta
-   */
-  public static void addRegionToMeta(CatalogTracker catalogTracker, HRegionInfo regionInfo,
-      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
-    HTable meta = MetaReader.getMetaHTable(catalogTracker);
-    try {
-      addRegionToMeta(meta, regionInfo, splitA, splitB);
-    } finally {
-      meta.close();
-    }
-  }
-
-  /**
-   * Adds a hbase:meta row for each of the specified new regions.
-   * @param catalogTracker CatalogTracker
-   * @param regionInfos region information list
-   * @throws IOException if problem connecting or updating meta
-   */
-  public static void addRegionsToMeta(CatalogTracker catalogTracker,
-      List<HRegionInfo> regionInfos)
-  throws IOException {
-    List<Put> puts = new ArrayList<Put>();
-    for (HRegionInfo regionInfo : regionInfos) {
-      if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-        puts.add(makePutFromRegionInfo(regionInfo));
-      }
-    }
-    putsToMetaTable(catalogTracker, puts);
-    LOG.info("Added " + puts.size());
-  }
-
-  /**
-   * Adds a daughter region entry to meta.
-   * @param regionInfo the region to put
-   * @param sn the location of the region
-   * @param openSeqNum the latest sequence number obtained when the region was open
-   */
-  public static void addDaughter(final CatalogTracker catalogTracker,
-      final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
-  throws NotAllMetaRegionsOnlineException, IOException {
-    Put put = new Put(regionInfo.getRegionName());
-    addRegionInfo(put, regionInfo);
-    if (sn != null) {
-      addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
-    }
-    putToMetaTable(catalogTracker, put);
-    LOG.info("Added daughter " + regionInfo.getEncodedName() +
-      (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
-  }
-
-  /**
-   * Merge the two regions into one in an atomic operation. Deletes the two
-   * merging regions in hbase:meta and adds the merged region with the information of
-   * two merging regions.
-   * @param catalogTracker the catalog tracker
-   * @param mergedRegion the merged region
-   * @param regionA
-   * @param regionB
-   * @param sn the location of the region
-   * @throws IOException
-   */
-  public static void mergeRegions(final CatalogTracker catalogTracker,
-      HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
-      ServerName sn) throws IOException {
-    HTable meta = MetaReader.getMetaHTable(catalogTracker);
-    try {
-      HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
-
-      // Put for parent
-      Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
-      putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
-          regionA.toByteArray());
-      putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
-          regionB.toByteArray());
-
-      // Deletes for merging regions
-      Delete deleteA = makeDeleteFromRegionInfo(regionA);
-      Delete deleteB = makeDeleteFromRegionInfo(regionB);
-
-      // The merged is a new region, openSeqNum = 1 is fine.
-      addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
-
-      byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
-          + HConstants.DELIMITER);
-      multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
-    } finally {
-      meta.close();
-    }
-  }
-
-  /**
-   * Splits the region into two in an atomic operation. Offlines the parent
-   * region with the information that it is split into two, and also adds
-   * the daughter regions. Does not add the location information to the daughter
-   * regions since they are not open yet.
-   * @param catalogTracker the catalog tracker
-   * @param parent the parent region which is split
-   * @param splitA Split daughter region A
-   * @param splitB Split daughter region A
-   * @param sn the location of the region
-   */
-  public static void splitRegion(final CatalogTracker catalogTracker,
-      HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
-      ServerName sn) throws IOException {
-    HTable meta = MetaReader.getMetaHTable(catalogTracker);
-    try {
-      HRegionInfo copyOfParent = new HRegionInfo(parent);
-      copyOfParent.setOffline(true);
-      copyOfParent.setSplit(true);
-
-      //Put for parent
-      Put putParent = makePutFromRegionInfo(copyOfParent);
-      addDaughtersToPut(putParent, splitA, splitB);
-
-      //Puts for daughters
-      Put putA = makePutFromRegionInfo(splitA);
-      Put putB = makePutFromRegionInfo(splitB);
-
-      addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
-      addLocation(putB, sn, 1, splitB.getReplicaId());
-
-      byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
-      multiMutate(meta, tableRow, putParent, putA, putB);
-    } finally {
-      meta.close();
-    }
-  }
-
-  /**
-   * Performs an atomic multi-Mutate operation against the given table.
-   */
-  private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
-    CoprocessorRpcChannel channel = table.coprocessorService(row);
-    MutateRowsRequest.Builder mmrBuilder = MutateRowsRequest.newBuilder();
-    for (Mutation mutation : mutations) {
-      if (mutation instanceof Put) {
-        mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
-      } else if (mutation instanceof Delete) {
-        mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
-      } else {
-        throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
-            + mutation.getClass().getName());
-      }
-    }
-
-    MultiRowMutationService.BlockingInterface service =
-        MultiRowMutationService.newBlockingStub(channel);
-    try {
-      service.mutateRows(null, mmrBuilder.build());
-    } catch (ServiceException ex) {
-      ProtobufUtil.toIOException(ex);
-    }
-  }
-
-  /**
-   * Updates the location of the specified region in hbase:meta to be the specified
-   * server hostname and startcode.
-   * <p>
-   * Uses passed catalog tracker to get a connection to the server hosting
-   * hbase:meta and makes edits to that region.
-   *
-   * @param catalogTracker catalog tracker
-   * @param regionInfo region to update location of
-   * @param sn Server name
-   * @throws IOException
-   */
-  public static void updateRegionLocation(CatalogTracker catalogTracker,
-      HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
-  throws IOException {
-    updateLocation(catalogTracker, regionInfo, sn, updateSeqNum);
-  }
-
-  /**
-   * Updates the location of the specified region to be the specified server.
-   * <p>
-   * Connects to the specified server which should be hosting the specified
-   * catalog region name to perform the edit.
-   *
-   * @param catalogTracker
-   * @param regionInfo region to update location of
-   * @param sn Server name
-   * @param openSeqNum the latest sequence number obtained when the region was open
-   * @throws IOException In particular could throw {@link java.net.ConnectException}
-   * if the server is down on other end.
-   */
-  private static void updateLocation(final CatalogTracker catalogTracker,
-      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
-  throws IOException {
-    // region replicas are kept in the primary region's row
-    Put put = new Put(getMetaKeyForRegion(regionInfo));
-    addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
-    putToCatalogTable(catalogTracker, put);
-    LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
-      " with server=" + sn);
-  }
-
-  /**
-   * Deletes the specified region from META.
-   * @param catalogTracker
-   * @param regionInfo region to be deleted from META
-   * @throws IOException
-   */
-  public static void deleteRegion(CatalogTracker catalogTracker,
-      HRegionInfo regionInfo)
-  throws IOException {
-    Delete delete = new Delete(regionInfo.getRegionName());
-    deleteFromMetaTable(catalogTracker, delete);
-    LOG.info("Deleted " + regionInfo.getRegionNameAsString());
-  }
-
-  /**
-   * Deletes the specified regions from META.
-   * @param catalogTracker
-   * @param regionsInfo list of regions to be deleted from META
-   * @throws IOException
-   */
-  public static void deleteRegions(CatalogTracker catalogTracker,
-      List<HRegionInfo> regionsInfo) throws IOException {
-    List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
-    for (HRegionInfo hri: regionsInfo) {
-      deletes.add(new Delete(hri.getRegionName()));
-    }
-    deleteFromMetaTable(catalogTracker, deletes);
-    LOG.info("Deleted " + regionsInfo);
-  }
-
-  /**
-   * Adds and Removes the specified regions from hbase:meta
-   * @param catalogTracker
-   * @param regionsToRemove list of regions to be deleted from META
-   * @param regionsToAdd list of regions to be added to META
-   * @throws IOException
-   */
-  public static void mutateRegions(CatalogTracker catalogTracker,
-      final List<HRegionInfo> regionsToRemove, final List<HRegionInfo> regionsToAdd)
-      throws IOException {
-    List<Mutation> mutation = new ArrayList<Mutation>();
-    if (regionsToRemove != null) {
-      for (HRegionInfo hri: regionsToRemove) {
-        mutation.add(new Delete(hri.getRegionName()));
-      }
-    }
-    if (regionsToAdd != null) {
-      for (HRegionInfo hri: regionsToAdd) {
-        mutation.add(makePutFromRegionInfo(hri));
-      }
-    }
-    mutateMetaTable(catalogTracker, mutation);
-    if (regionsToRemove != null && regionsToRemove.size() > 0) {
-      LOG.debug("Deleted " + regionsToRemove);
-    }
-    if (regionsToAdd != null && regionsToAdd.size() > 0) {
-      LOG.debug("Added " + regionsToAdd);
-    }
-  }
-
-  /**
-   * Overwrites the specified regions from hbase:meta
-   * @param catalogTracker
-   * @param regionInfos list of regions to be added to META
-   * @throws IOException
-   */
-  public static void overwriteRegions(CatalogTracker catalogTracker,
-      List<HRegionInfo> regionInfos) throws IOException {
-    deleteRegions(catalogTracker, regionInfos);
-    // Why sleep? This is the easiest way to ensure that the previous deletes does not
-    // eclipse the following puts, that might happen in the same ts from the server.
-    // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
-    // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
-    Threads.sleep(20);
-    addRegionsToMeta(catalogTracker, regionInfos);
-    LOG.info("Overwritten " + regionInfos);
-  }
-
-  /**
-   * Deletes merge qualifiers for the specified merged region.
-   * @param catalogTracker
-   * @param mergedRegion
-   * @throws IOException
-   */
-  public static void deleteMergeQualifiers(CatalogTracker catalogTracker,
-      final HRegionInfo mergedRegion) throws IOException {
-    Delete delete = new Delete(mergedRegion.getRegionName());
-    delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
-    delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
-    deleteFromMetaTable(catalogTracker, delete);
-    LOG.info("Deleted references in merged region "
-        + mergedRegion.getRegionNameAsString() + ", qualifier="
-        + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
-        + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
-  }
-
-  private static Put addRegionInfo(final Put p, final HRegionInfo hri)
-  throws IOException {
-    p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        hri.toByteArray());
-    return p;
-  }
-
-  public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
-    p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getServerColumn(replicaId),
-      Bytes.toBytes(sn.getHostAndPort()));
-    p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getStartCodeColumn(replicaId),
-      Bytes.toBytes(sn.getStartcode()));
-    p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getSeqNumColumn(replicaId),
-        Bytes.toBytes(openSeqNum));
-    return p;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
deleted file mode 100644
index f623c4b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.catalog;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * A tool to migrate the data stored in hbase:meta table to pbuf serialization.
- * Supports migrating from 0.92.x and 0.94.x to 0.96.x for the catalog table.
- * @deprecated will be removed for the major release after 0.96.
- */
-@Deprecated
-public class MetaMigrationConvertingToPB {
-
-  private static final Log LOG = LogFactory.getLog(MetaMigrationConvertingToPB.class);
-
-  private static class ConvertToPBMetaVisitor implements Visitor {
-    private final MasterServices services;
-    private long numMigratedRows;
-
-    public ConvertToPBMetaVisitor(MasterServices services) {
-      this.services = services;
-      numMigratedRows = 0;
-    }
-
-    @Override
-    public boolean visit(Result r) throws IOException {
-      if (r ==  null || r.isEmpty()) return true;
-      // Check info:regioninfo, info:splitA, and info:splitB.  Make sure all
-      // have migrated HRegionInfos.
-      byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
-      // Presumes that an edit updating all three cells either succeeds or
-      // doesn't -- that we don't have case of info:regioninfo migrated but not
-      // info:splitA.
-      if (isMigrated(hriBytes)) return true;
-      // OK. Need to migrate this row in meta.
-
-      //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
-      //writable serialization
-      HRegionInfo hri = parseFrom(hriBytes);
-
-      // Now make a put to write back to meta.
-      Put p =  MetaEditor.makePutFromRegionInfo(hri);
-
-      // Now migrate info:splitA and info:splitB if they are not null
-      migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
-      migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
-
-      MetaEditor.putToCatalogTable(this.services.getCatalogTracker(), p);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Migrated " + Bytes.toString(p.getRow()));
-      }
-      numMigratedRows++;
-      return true;
-    }
-  }
-
-  static void migrateSplitIfNecessary(final Result r, final Put p, final byte [] which)
-      throws IOException {
-    byte [] hriSplitBytes = getBytes(r, which);
-    if (!isMigrated(hriSplitBytes)) {
-      //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
-      //writable serialization
-      HRegionInfo hri = parseFrom(hriSplitBytes);
-      p.addImmutable(HConstants.CATALOG_FAMILY, which, hri.toByteArray());
-    }
-  }
-
-  static HRegionInfo parseFrom(byte[] hriBytes) throws IOException {
-    try {
-      return HRegionInfo.parseFrom(hriBytes);
-    } catch (DeserializationException ex) {
-      throw new IOException(ex);
-    }
-  }
-
-  /**
-   * @param r Result to dig in.
-   * @param qualifier Qualifier to look at in the passed <code>r</code>.
-   * @return Bytes for an HRegionInfo or null if no bytes or empty bytes found.
-   */
-  static byte [] getBytes(final Result r, final byte [] qualifier) {
-    byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
-    if (hriBytes == null || hriBytes.length <= 0) return null;
-    return hriBytes;
-  }
-
-  static boolean isMigrated(final byte [] hriBytes) {
-    if (hriBytes == null || hriBytes.length <= 0) return true;
-
-    return ProtobufUtil.isPBMagicPrefix(hriBytes);
-  }
-
-  /**
-   * Converting writable serialization to PB, if it is needed.
-   * @param services MasterServices to get a handle on master
-   * @return num migrated rows
-   * @throws IOException or RuntimeException if something goes wrong
-   */
-  public static long updateMetaIfNecessary(final MasterServices services)
-  throws IOException {
-    if (isMetaTableUpdated(services.getCatalogTracker())) {
-      LOG.info("META already up-to date with PB serialization");
-      return 0;
-    }
-    LOG.info("META has Writable serializations, migrating hbase:meta to PB serialization");
-    try {
-      long rows = updateMeta(services);
-      LOG.info("META updated with PB serialization. Total rows updated: " + rows);
-      return rows;
-    } catch (IOException e) {
-      LOG.warn("Update hbase:meta with PB serialization failed." + "Master startup aborted.");
-      throw e;
-    }
-  }
-
-  /**
-   * Update hbase:meta rows, converting writable serialization to PB
-   * @return num migrated rows
-   */
-  static long updateMeta(final MasterServices masterServices) throws IOException {
-    LOG.info("Starting update of META");
-    ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
-    MetaReader.fullScan(masterServices.getCatalogTracker(), v);
-    LOG.info("Finished update of META. Total rows updated:" + v.numMigratedRows);
-    return v.numMigratedRows;
-  }
-
-  /**
-   * @param catalogTracker the catalog tracker
-   * @return True if the meta table has been migrated.
-   * @throws IOException
-   */
-  static boolean isMetaTableUpdated(final CatalogTracker catalogTracker) throws IOException {
-    List<Result> results = MetaReader.fullScanOfMeta(catalogTracker);
-    if (results == null || results.isEmpty()) {
-      LOG.info("hbase:meta doesn't have any entries to update.");
-      return true;
-    }
-    for (Result r : results) {
-      byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-      if (!isMigrated(value)) {
-        return false;
-      }
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 7e7ba76..118f2ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
@@ -212,7 +212,7 @@ public class TableMapReduceUtil {
         MutationSerialization.class.getName(), ResultSerialization.class.getName());
     if (partitioner == HRegionPartitioner.class) {
       job.setPartitionerClass(HRegionPartitioner.class);
-      int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+      int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
       if (job.getNumReduceTasks() > regions) {
         job.setNumReduceTasks(regions);
       }
@@ -278,7 +278,7 @@ public class TableMapReduceUtil {
    */
   public static void limitNumReduceTasks(String table, JobConf job)
   throws IOException {
-    int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+    int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
     if (job.getNumReduceTasks() > regions)
       job.setNumReduceTasks(regions);
   }
@@ -293,7 +293,7 @@ public class TableMapReduceUtil {
    */
   public static void limitNumMapTasks(String table, JobConf job)
   throws IOException {
-    int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+    int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
     if (job.getNumMapTasks() > regions)
       job.setNumMapTasks(regions);
   }
@@ -308,7 +308,7 @@ public class TableMapReduceUtil {
    */
   public static void setNumReduceTasks(String table, JobConf job)
   throws IOException {
-    job.setNumReduceTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
+    job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
   }
 
   /**
@@ -321,7 +321,7 @@ public class TableMapReduceUtil {
    */
   public static void setNumMapTasks(String table, JobConf job)
   throws IOException {
-    job.setNumMapTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
+    job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index facef82..71c60b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -635,7 +635,7 @@ public class TableMapReduceUtil {
     job.setOutputValueClass(Writable.class);
     if (partitioner == HRegionPartitioner.class) {
       job.setPartitionerClass(HRegionPartitioner.class);
-      int regions = MetaReader.getRegionCount(conf, table);
+      int regions = MetaTableAccessor.getRegionCount(conf, table);
       if (job.getNumReduceTasks() > regions) {
         job.setNumReduceTasks(regions);
       }
@@ -660,7 +660,7 @@ public class TableMapReduceUtil {
    */
   public static void limitNumReduceTasks(String table, Job job)
   throws IOException {
-    int regions = MetaReader.getRegionCount(job.getConfiguration(), table);
+    int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table);
     if (job.getNumReduceTasks() > regions)
       job.setNumReduceTasks(regions);
   }
@@ -675,7 +675,7 @@ public class TableMapReduceUtil {
    */
   public static void setNumReduceTasks(String table, Job job)
   throws IOException {
-    job.setNumReduceTasks(MetaReader.getRegionCount(job.getConfiguration(), table));
+    job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index f407569..2888e1e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -61,9 +61,8 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
@@ -101,7 +100,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Triple;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -139,8 +138,6 @@ public class AssignmentManager extends ZooKeeperListener {
 
   private boolean shouldAssignRegionsWithFavoredNodes;
 
-  private CatalogTracker catalogTracker;
-
   private LoadBalancer balancer;
 
   private final MetricsAssignmentManager metricsAssignmentManager;
@@ -254,22 +251,23 @@ public class AssignmentManager extends ZooKeeperListener {
   /**
    * Constructs a new assignment manager.
    *
-   * @param server
-   * @param serverManager
-   * @param catalogTracker
-   * @param service
+   * @param server instance of HMaster this AM running inside
+   * @param serverManager serverManager for associated HMaster
+   * @param balancer implementation of {@link LoadBalancer}
+   * @param service Executor service
+   * @param metricsMaster metrics manager
+   * @param tableLockManager TableLock manager
    * @throws KeeperException
    * @throws IOException
    */
   public AssignmentManager(Server server, ServerManager serverManager,
-      CatalogTracker catalogTracker, final LoadBalancer balancer,
+      final LoadBalancer balancer,
       final ExecutorService service, MetricsMaster metricsMaster,
       final TableLockManager tableLockManager) throws KeeperException,
         IOException, CoordinatedStateException {
     super(server.getZooKeeper());
     this.server = server;
     this.serverManager = serverManager;
-    this.catalogTracker = catalogTracker;
     this.executorService = service;
     this.regionStateStore = new RegionStateStore(server);
     this.regionsToReopen = Collections.synchronizedMap
@@ -404,7 +402,8 @@ public class AssignmentManager extends ZooKeeperListener {
   public Pair<Integer, Integer> getReopenStatus(TableName tableName)
       throws IOException {
     List <HRegionInfo> hris =
-      MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
+      MetaTableAccessor.getTableRegions(this.watcher, this.server.getShortCircuitConnection(),
+        tableName, true);
     Integer pending = 0;
     for (HRegionInfo hri : hris) {
       String name = hri.getEncodedName();
@@ -759,7 +758,7 @@ public class AssignmentManager extends ZooKeeperListener {
       if (regionInfo.isMetaRegion()) {
         // If it's meta region, reset the meta location.
         // So that master knows the right meta region server.
-        MetaRegionTracker.setMetaLocation(watcher, sn);
+        MetaTableLocator.setMetaLocation(watcher, sn);
       } else {
         // No matter the previous server is online or offline,
         // we need to reset the last region server of the region.
@@ -1129,7 +1128,8 @@ public class AssignmentManager extends ZooKeeperListener {
       regionToFavoredNodes.put(region,
           ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
     }
-    FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
+    FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes,
+      this.server.getShortCircuitConnection());
   }
 
   /**
@@ -1152,7 +1152,8 @@ public class AssignmentManager extends ZooKeeperListener {
         } else {
           try {
             byte [] name = rt.getRegionName();
-            Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name);
+            Pair<HRegionInfo, ServerName> p = MetaTableAccessor.getRegion(
+              this.server.getShortCircuitConnection(), name);
             regionInfo = p.getFirst();
           } catch (IOException e) {
             LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
@@ -1935,13 +1936,15 @@ public class AssignmentManager extends ZooKeeperListener {
       final HRegionInfo region, final ServerName sn) {
     try {
       if (region.isMetaRegion()) {
-        ServerName server = catalogTracker.getMetaLocation();
+        ServerName server = this.server.getMetaTableLocator().
+          getMetaRegionLocation(this.server.getZooKeeper());
         return regionStates.isServerDeadAndNotProcessed(server);
       }
       while (!server.isStopped()) {
         try {
-          catalogTracker.waitForMeta();
-          Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
+          this.server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
+          Result r = MetaTableAccessor.getRegionResult(server.getShortCircuitConnection(),
+            region.getRegionName());
           if (r == null || r.isEmpty()) return false;
           ServerName server = HRegionInfo.getServerName(r);
           return regionStates.isServerDeadAndNotProcessed(server);
@@ -2554,7 +2557,7 @@ public class AssignmentManager extends ZooKeeperListener {
    * @throws KeeperException
    */
   public void assignMeta() throws KeeperException {
-    MetaRegionTracker.deleteMetaLocation(this.watcher);
+    this.server.getMetaTableLocator().deleteMetaLocation(this.watcher);
     assign(HRegionInfo.FIRST_META_REGIONINFO, true);
   }
 
@@ -2754,7 +2757,7 @@ public class AssignmentManager extends ZooKeeperListener {
       ZooKeeperProtos.Table.State.ENABLING);
 
     // Region assignment from META
-    List<Result> results = MetaReader.fullScan(this.catalogTracker);
+    List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection());
     // Get any new but slow to checkin region server that joined the cluster
     Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
     // Set of offline servers to be returned
@@ -2765,7 +2768,7 @@ public class AssignmentManager extends ZooKeeperListener {
         LOG.debug("null result from meta - ignoring but this is strange.");
         continue;
       }
-      RegionLocations rl =  MetaReader.getRegionLocations(result);
+      RegionLocations rl =  MetaTableAccessor.getRegionLocations(result);
       if (rl == null) continue;
       HRegionLocation[] locations = rl.getRegionLocations();
       if (locations == null) continue;
@@ -2826,7 +2829,7 @@ public class AssignmentManager extends ZooKeeperListener {
         LOG.info("The table " + tableName
             + " is in DISABLING state.  Hence recovering by moving the table"
             + " to DISABLED state.");
-        new DisableTableHandler(this.server, tableName, catalogTracker,
+        new DisableTableHandler(this.server, tableName,
             this, tableLockManager, true).prepare().process();
       }
     }
@@ -2853,7 +2856,7 @@ public class AssignmentManager extends ZooKeeperListener {
         // enableTable in sync way during master startup,
         // no need to invoke coprocessor
         EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
-          catalogTracker, this, tableLockManager, true);
+          this, tableLockManager, true);
         try {
           eth.prepare();
         } catch (TableNotFoundException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a0a2a2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 1ffec8a..051002e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -40,8 +40,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.Result;
@@ -199,7 +198,8 @@ public class CatalogJanitor extends Chore {
           + " from fs because merged region no longer holds references");
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
-      MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
+      MetaTableAccessor.deleteMergeQualifiers(server.getShortCircuitConnection(),
+        mergedRegion);
       return true;
     }
     return false;
@@ -331,7 +331,7 @@ public class CatalogJanitor extends Chore {
       FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
       if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
       HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
-      MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
+      MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent);
       result = true;
     }
     return result;
@@ -403,9 +403,9 @@ public class CatalogJanitor extends Chore {
       throws IOException {
     // Get merge regions if it is a merged region and already has merge
     // qualifier
-    Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaReader
-        .getRegionsFromMergeQualifier(this.services.getCatalogTracker(),
-            region.getRegionName());
+    Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
+        .getRegionsFromMergeQualifier(this.services.getShortCircuitConnection(),
+          region.getRegionName());
     if (mergeRegions == null
         || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
       // It doesn't have merge qualifier, no need to clean