You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ra...@apache.org on 2017/11/08 10:37:32 UTC

[2/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface, HConnection with Table, Connection interfaces respectively(Rajeshbabu)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
index f45b356..444bb5d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
@@ -22,13 +22,13 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -46,19 +48,14 @@ import com.google.protobuf.Message;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
-public class DelegateHTable implements HTableInterface {
-    protected final HTableInterface delegate;
+public class DelegateHTable implements Table {
+    protected final Table delegate;
 
-    public DelegateHTable(HTableInterface delegate) {
+    public DelegateHTable(Table delegate) {
         this.delegate = delegate;
     }
 
     @Override
-    public byte[] getTableName() {
-        return delegate.getTableName();
-    }
-
-    @Override
     public TableName getName() {
         return delegate.getName();
     }
@@ -79,34 +76,22 @@ public class DelegateHTable implements HTableInterface {
     }
 
     @Override
-    public Boolean[] exists(List<Get> gets) throws IOException {
-        return delegate.exists(gets);
+    public boolean[] existsAll(List<Get> gets) throws IOException {
+        return delegate.existsAll(gets);
     }
 
     @Override
-    public void batch(List<? extends Row> actions, Object[] results) throws IOException, InterruptedException {
+    public void batch(List<? extends Row> actions, Object[] results) throws IOException,
+            InterruptedException {
         delegate.batch(actions, results);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public Object[] batch(List<? extends Row> actions) throws IOException, InterruptedException {
-        return delegate.batch(actions);
-    }
-
     @Override
-    public <R> void batchCallback(List<? extends Row> actions, Object[] results, Callback<R> callback)
-            throws IOException, InterruptedException {
+    public <R> void batchCallback(List<? extends Row> actions, Object[] results,
+            Callback<R> callback) throws IOException, InterruptedException {
         delegate.batchCallback(actions, results, callback);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public <R> Object[] batchCallback(List<? extends Row> actions, Callback<R> callback) throws IOException,
-            InterruptedException {
-        return delegate.batchCallback(actions, callback);
-    }
-
     @Override
     public Result get(Get get) throws IOException {
         return delegate.get(get);
@@ -117,12 +102,6 @@ public class DelegateHTable implements HTableInterface {
         return delegate.get(gets);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
-        return delegate.getRowOrBefore(row, family);
-    }
-
     @Override
     public ResultScanner getScanner(Scan scan) throws IOException {
         return delegate.getScanner(scan);
@@ -149,11 +128,18 @@ public class DelegateHTable implements HTableInterface {
     }
 
     @Override
-    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException {
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
+            throws IOException {
         return delegate.checkAndPut(row, family, qualifier, value, put);
     }
 
     @Override
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+            byte[] value, Put put) throws IOException {
+        return delegate.checkAndPut(row, family, qualifier, compareOp, value, put);
+    }
+
+    @Override
     public void delete(Delete delete) throws IOException {
         delegate.delete(delete);
     }
@@ -164,12 +150,18 @@ public class DelegateHTable implements HTableInterface {
     }
 
     @Override
-    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete)
-            throws IOException {
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
+            Delete delete) throws IOException {
         return delegate.checkAndDelete(row, family, qualifier, value, delete);
     }
 
     @Override
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+            byte[] value, Delete delete) throws IOException {
+        return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete);
+    }
+
+    @Override
     public void mutateRow(RowMutations rm) throws IOException {
         delegate.mutateRow(rm);
     }
@@ -185,33 +177,17 @@ public class DelegateHTable implements HTableInterface {
     }
 
     @Override
-    public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException {
+    public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
+            throws IOException {
         return delegate.incrementColumnValue(row, family, qualifier, amount);
     }
 
     @Override
-    public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability)
-            throws IOException {
+    public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
+            Durability durability) throws IOException {
         return delegate.incrementColumnValue(row, family, qualifier, amount, durability);
     }
 
-    @SuppressWarnings("deprecation")
-    @Override
-    public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL)
-            throws IOException {
-        return delegate.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
-    }
-
-    @Override
-    public boolean isAutoFlush() {
-        return delegate.isAutoFlush();
-    }
-
-    @Override
-    public void flushCommits() throws IOException {
-        delegate.flushCommits();
-    }
-
     @Override
     public void close() throws IOException {
         delegate.close();
@@ -223,98 +199,99 @@ public class DelegateHTable implements HTableInterface {
     }
 
     @Override
-    public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey,
-            Call<T, R> callable) throws ServiceException, Throwable {
+    public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service,
+            byte[] startKey, byte[] endKey, Call<T, R> callable) throws ServiceException, Throwable {
         return delegate.coprocessorService(service, startKey, endKey, callable);
     }
 
     @Override
-    public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey,
-            Call<T, R> callable, Callback<R> callback) throws ServiceException, Throwable {
+    public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey,
+            byte[] endKey, Call<T, R> callable, Callback<R> callback) throws ServiceException,
+            Throwable {
         delegate.coprocessorService(service, startKey, endKey, callable, callback);
+        
     }
 
-    @SuppressWarnings("deprecation")
     @Override
-    public void setAutoFlush(boolean autoFlush) {
-        delegate.setAutoFlush(autoFlush);
+    public <R extends Message> Map<byte[], R> batchCoprocessorService(
+            MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
+            R responsePrototype) throws ServiceException, Throwable {
+        return delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
     }
 
     @Override
-    public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
-        delegate.setAutoFlush(autoFlush, clearBufferOnFail);
+    public <R extends Message> void batchCoprocessorService(MethodDescriptor methodDescriptor,
+            Message request, byte[] startKey, byte[] endKey, R responsePrototype,
+            Callback<R> callback) throws ServiceException, Throwable {
+        delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
     }
 
     @Override
-    public void setAutoFlushTo(boolean autoFlush) {
-        delegate.setAutoFlushTo(autoFlush);
+    public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+            byte[] value, RowMutations mutation) throws IOException {
+        return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
     }
 
     @Override
-    public long getWriteBufferSize() {
-        return delegate.getWriteBufferSize();
+    public void setOperationTimeout(int operationTimeout) {
+        delegate.setOperationTimeout(operationTimeout);
     }
 
     @Override
-    public void setWriteBufferSize(long writeBufferSize) throws IOException {
-        delegate.setWriteBufferSize(writeBufferSize);
+    public int getOperationTimeout() {
+        return delegate.getOperationTimeout();
     }
 
     @Override
-    public <R extends Message> Map<byte[], R> batchCoprocessorService(MethodDescriptor methodDescriptor,
-            Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
-        return delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
+    public int getRpcTimeout() {
+        return delegate.getRpcTimeout();
     }
 
     @Override
-    public <R extends Message> void batchCoprocessorService(MethodDescriptor methodDescriptor, Message request,
-            byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback) throws ServiceException,
-            Throwable {
-        delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, callback);
+    public void setRpcTimeout(int rpcTimeout) {
+        delegate.setRpcTimeout(rpcTimeout);
     }
 
     @Override
-    public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value,
-            RowMutations mutation) throws IOException {
-        return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
+    public TableDescriptor getDescriptor() throws IOException {
+        return delegate.getDescriptor();
     }
 
     @Override
-    public void setOperationTimeout(int i) {
-        delegate.setOperationTimeout(i);
+    public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+            byte[] value, Put put) throws IOException {
+        return delegate.checkAndPut(row, family, qualifier, op, value, put);
     }
 
     @Override
-    public int getOperationTimeout() {
-        return delegate.getOperationTimeout();
+    public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+            byte[] value, Delete delete) throws IOException {
+        return delegate.checkAndDelete(row, family, qualifier, op, value, delete);
     }
 
     @Override
-    public void setRpcTimeout(int i) {
-        delegate.setRpcTimeout(i);
+    public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+            byte[] value, RowMutations mutation) throws IOException {
+        return delegate.checkAndMutate(row, family, qualifier, op, value, mutation);
     }
 
     @Override
-    public int getRpcTimeout() {
-        return delegate.getRpcTimeout();
+    public int getReadRpcTimeout() {
+        return delegate.getReadRpcTimeout();
     }
 
     @Override
-	public boolean[] existsAll(List<Get> gets) throws IOException {
-		return delegate.existsAll(gets);
-	}
-
-	@Override
-	public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
-			CompareOp compareOp, byte[] value, Put put) throws IOException {
-		return delegate.checkAndPut(row, family, qualifier, value, put);
-	}
+    public void setReadRpcTimeout(int readRpcTimeout) {
+        delegate.setReadRpcTimeout(readRpcTimeout);
+    }
 
-	@Override
-	public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
-			CompareOp compareOp, byte[] value, Delete delete)
-			throws IOException {
-		return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete);
-	}
+    @Override
+    public int getWriteRpcTimeout() {
+        return delegate.getWriteRpcTimeout();
+    }
 
+    @Override
+    public void setWriteRpcTimeout(int writeRpcTimeout) {
+        delegate.setWriteRpcTimeout(writeRpcTimeout);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index f2edca4..bd0743c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -38,9 +38,10 @@ import javax.annotation.Nonnull;
 import javax.annotation.concurrent.Immutable;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -286,8 +287,8 @@ public class MutationState implements SQLCloseable {
     // be called by TableResultIterator in a multi-threaded manner. Since we do not want to expose
     // the Transaction outside of MutationState, this seems reasonable, as the member variables
     // would not change as these threads are running.
-    public HTableInterface getHTable(PTable table) throws SQLException {
-        HTableInterface htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
+    public Table getHTable(PTable table) throws SQLException {
+        Table htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
         if (table.isTransactional() && phoenixTransactionContext.isTransactionRunning()) {
             PhoenixTransactionalTable phoenixTransactionTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, htable, table);
             // Using cloned mutationState as we may have started a new transaction already
@@ -779,7 +780,7 @@ public class MutationState implements SQLCloseable {
     private class MetaDataAwareHTable extends DelegateHTable {
         private final TableRef tableRef;
         
-        private MetaDataAwareHTable(HTableInterface delegate, TableRef tableRef) {
+        private MetaDataAwareHTable(Table delegate, TableRef tableRef) {
             super(delegate);
             this.tableRef = tableRef;
         }
@@ -809,7 +810,7 @@ public class MutationState implements SQLCloseable {
                         ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                         for (PTable index : rowKeyIndexes) {
                             List<Delete> indexDeletes = IndexUtil.generateDeleteIndexData(table, index, deletes, ptr, connection.getKeyValueBuilder(), connection);
-                            HTableInterface hindex = connection.getQueryServices().getTable(index.getPhysicalName().getBytes());
+                            Table hindex = connection.getQueryServices().getTable(index.getPhysicalName().getBytes());
                             hindex.delete(indexDeletes);
                         }
                     }
@@ -976,7 +977,7 @@ public class MutationState implements SQLCloseable {
                     // region servers.
                     shouldRetry = cache!=null;
                     SQLException sqlE = null;
-                    HTableInterface hTable = connection.getQueryServices().getTable(htableName);
+                    Table hTable = connection.getQueryServices().getTable(htableName);
                     try {
                         if (table.isTransactional()) {
                             // Track tables to which we've sent uncommitted data
@@ -1000,7 +1001,8 @@ public class MutationState implements SQLCloseable {
                         child.addTimelineAnnotation("Attempt " + retryCount);
                         List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
                         for (List<Mutation> mutationBatch : mutationBatchList) {
-                            hTable.batch(mutationBatch);
+                            // TODO need to get the the results of batch and fail if any exceptions.
+                            hTable.batch(mutationBatch, null);
                             batchCount++;
                         }
                         if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
@@ -1027,7 +1029,7 @@ public class MutationState implements SQLCloseable {
                                 // If it fails again, we don't retry.
                                 String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
                                 logger.warn(LogUtil.addCustomAnnotations(msg, connection));
-                                connection.getQueryServices().clearTableRegionCache(htableName);
+                                connection.getQueryServices().clearTableRegionCache(TableName.valueOf(htableName));
 
                                 // add a new child span as this one failed
                                 child.addTimelineAnnotation(msg);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
index 45e271d..8426484 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
@@ -22,7 +22,7 @@ import java.util.concurrent.ExecutorService;
 
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 public class CoprocessorHTableFactory implements HTableFactory {
@@ -34,12 +34,12 @@ public class CoprocessorHTableFactory implements HTableFactory {
     }
 
     @Override
-    public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
+    public Table getTable(ImmutableBytesPtr tablename) throws IOException {
         return this.e.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
     }
     
     @Override
-    public HTableInterface getTable(ImmutableBytesPtr tablename,ExecutorService pool) throws IOException {
+    public Table getTable(ImmutableBytesPtr tablename,ExecutorService pool) throws IOException {
         return this.e.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
index e6a2e60..a73f403 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
@@ -21,14 +21,14 @@ package org.apache.phoenix.hbase.index.table;
 import java.io.IOException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 public interface HTableFactory {
 
-  public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException;
+  public Table getTable(ImmutableBytesPtr tablename) throws IOException;
 
   public void shutdown();
 
-  public HTableInterface getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException;
+  public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 3649069..16c26ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -26,9 +26,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.CoprocessorHConnection;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -95,7 +96,7 @@ public class IndexWriterUtils {
         IndexManagementUtil.setIfNotSet(conf, HTABLE_THREAD_KEY, htableThreads);
         if (env instanceof RegionCoprocessorEnvironment) {
             RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
-            RegionServerServices services = e.getRegionServerServices();
+            RegionServerServices services =e.getRegionServerServices();
             if (services instanceof HRegionServer) {
                 return new CoprocessorHConnectionTableFactory(conf, (HRegionServer) services);
             }
@@ -110,7 +111,7 @@ public class IndexWriterUtils {
      */
     private static class CoprocessorHConnectionTableFactory implements HTableFactory {
         @GuardedBy("CoprocessorHConnectionTableFactory.this")
-        private HConnection connection;
+        private Connection connection;
         private final Configuration conf;
         private final HRegionServer server;
 
@@ -119,7 +120,7 @@ public class IndexWriterUtils {
             this.server = server;
         }
 
-        private synchronized HConnection getConnection(Configuration conf) throws IOException {
+        private synchronized Connection getConnection(Configuration conf) throws IOException {
             if (connection == null || connection.isClosed()) {
                 connection = new CoprocessorHConnection(conf, server);
             }
@@ -127,8 +128,8 @@ public class IndexWriterUtils {
         }
 
         @Override
-        public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
-            return getConnection(conf).getTable(tablename.copyBytesIfNecessary());
+        public Table getTable(ImmutableBytesPtr tablename) throws IOException {
+            return getConnection(conf).getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
         }
 
         @Override
@@ -143,9 +144,9 @@ public class IndexWriterUtils {
         }
 
         @Override
-        public HTableInterface getTable(ImmutableBytesPtr tablename, ExecutorService pool)
+        public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool)
                 throws IOException {
-            return getConnection(conf).getTable(tablename.copyBytesIfNecessary(), pool);
+            return getConnection(conf).getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index e4e8343..dadaf75 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -22,8 +22,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure;
@@ -143,7 +143,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                     if (LOG.isTraceEnabled()) {
                         LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
                     }
-                    HTableInterface table = null;
+                    Table table = null;
                     try {
                         if (allowLocalUpdates
                                 && env != null

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 0449e9e..a60ced4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -24,8 +24,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.hbase.index.CapturingAbortable;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
@@ -149,7 +149,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                 @SuppressWarnings("deprecation")
                 @Override
                 public Boolean call() throws Exception {
-                    HTableInterface table = null;
+                    Table table = null;
                     try {
                         // this may have been queued, but there was an abort/stop so we try to early exit
                         throwFailureIfDone();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index cc2c6b3..c217d8e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -194,7 +194,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
             timestamp = minTimeStamp;
 
             // If the data table has local index column families then get local indexes to disable.
-            if (ref.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())
+            if (ref.getTableName().equals(env.getRegion().getTableDesc().getTableName().getNameAsString())
                     && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) {
                 for (String tableName : getLocalIndexNames(ref, mutations)) {
                     indexTableNames.put(tableName, minTimeStamp);
@@ -225,7 +225,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
                 minTimeStamp *= -1;
             }
             // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
-            try (HTableInterface systemTable = env.getTable(SchemaUtil
+            try (Table systemTable = env.getTable(SchemaUtil
                     .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) {
                 MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
                         systemTable, newState);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index f3c1dbd..2dea53c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -344,7 +344,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
                 ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
                 scanRanges.initializeScan(scan);
                 TableName tableName = env.getRegion().getRegionInfo().getTable();
-                HTableInterface htable = env.getTable(tableName);
+                Table htable = env.getTable(tableName);
                 txTable = TransactionFactory.getTransactionFactory().getTransactionalTable(txnContext, htable);
                 // For rollback, we need to see all versions, including
                 // the last committed version as there may be multiple

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index f037a20..46fd55c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -949,7 +949,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                         } catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e2){
                             // Catch only to try to recover from region boundary cache being out of date
                             if (!clearedCache) { // Clear cache once so that we rejigger job based on new boundaries
-                                services.clearTableRegionCache(physicalTableName);
+                                services.clearTableRegionCache(TableName.valueOf(physicalTableName));
                                 context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
                             }
                             // Resubmit just this portion of work again

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 68592ef..c09b3c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -164,12 +164,12 @@ public class SnapshotScanner extends AbstractClientScanner {
       }
 
       @Override
-      public HTableInterface getTable(TableName tableName) throws IOException {
+      public Table getTable(TableName tableName) throws IOException {
         throw new UnsupportedOperationException();
       }
 
       @Override
-      public HTableInterface getTable(TableName tableName, ExecutorService executorService)
+      public Table getTable(TableName tableName, ExecutorService executorService)
           throws IOException {
         throw new UnsupportedOperationException();
       }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
index e812854..c9e7bfb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
@@ -37,8 +37,8 @@ import java.util.concurrent.locks.ReentrantLock;
 import javax.annotation.concurrent.GuardedBy;
 
 import org.apache.hadoop.hbase.client.AbstractClientScanner;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.cache.ServerCacheClient;
@@ -75,7 +75,7 @@ import com.google.common.annotations.VisibleForTesting;
  */
 public class TableResultIterator implements ResultIterator {
     private final Scan scan;
-    private final HTableInterface htable;
+    private final Table htable;
     private final ScanMetricsHolder scanMetricsHolder;
     private static final ResultIterator UNINITIALIZED_SCANNER = ResultIterator.EMPTY_ITERATOR;
     private final long renewLeaseThreshold;
@@ -188,7 +188,7 @@ public class TableResultIterator implements ResultIterator {
                                 newScan.setStartRow(ByteUtil.nextKey(startRowSuffix));
                             }
                         }
-                        plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getTableName());
+                        plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getName());
 						if (e1 instanceof HashJoinCacheNotFoundException) {
 							logger.debug(
 									"Retrying when Hash Join cache is not found on the server ,by sending the cache again");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index ec1b451..bf4e277 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.NullWritable;
@@ -109,7 +110,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
             // Clear the table region boundary cache to make sure long running jobs stay up to date
             byte[] tableNameBytes = queryPlan.getTableRef().getTable().getPhysicalName().getBytes();
             ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices();
-            services.clearTableRegionCache(tableNameBytes);
+            services.clearTableRegionCache(TableName.valueOf(tableNameBytes));
 
             long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
             boolean isRequestMetricsEnabled = readMetrics.isRequestMetricsEnabled();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
index 59b26b2..11a8176 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
@@ -22,8 +22,11 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,7 +39,7 @@ public class DirectHTableWriter {
 
     private Configuration conf = null;
 
-    private HTable table;
+    private Table table;
 
     public DirectHTableWriter(Configuration otherConf) {
         setConf(otherConf);
@@ -51,8 +54,8 @@ public class DirectHTableWriter {
         }
 
         try {
-            this.table = new HTable(this.conf, tableName);
-            this.table.setAutoFlush(false, true);
+            Connection conn = ConnectionFactory.createConnection(this.conf);
+            this.table = conn.getTable(TableName.valueOf(tableName));
             LOG.info("Created table instance for " + tableName);
         } catch (IOException e) {
             LOG.error("IOException : ", e);
@@ -69,7 +72,7 @@ public class DirectHTableWriter {
         return conf;
     }
 
-    protected HTable getTable() {
+    protected Table getTable() {
         return table;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 671e4cf..af080b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -46,11 +46,14 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
+import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
@@ -431,8 +434,9 @@ public class IndexTool extends Configured implements Tool {
             final Configuration configuration = job.getConfiguration();
             final String physicalIndexTable =
                     PhoenixConfigurationUtil.getPhysicalTableName(configuration);
-            final HTable htable = new HTable(configuration, physicalIndexTable);
-            HFileOutputFormat.configureIncrementalLoad(job, htable);
+            org.apache.hadoop.hbase.client.Connection conn = ConnectionFactory.createConnection(configuration);
+            TableName tablename = TableName.valueOf(physicalIndexTable);
+            HFileOutputFormat2.configureIncrementalLoad(job, conn.getTable(tablename),conn.getRegionLocator(tablename));
             return job;
                
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 45ab5fa..558df85 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -26,9 +26,10 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.MutationPlan;
@@ -66,7 +67,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
      * @return the HTableInterface
      * @throws SQLException 
      */
-    public HTableInterface getTable(byte[] tableName) throws SQLException;
+    public Table getTable(byte[] tableName) throws SQLException;
 
     public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException;
 
@@ -93,7 +94,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
     public int getLowestClusterHBaseVersion();
     public HBaseAdmin getAdmin() throws SQLException;
 
-    void clearTableRegionCache(byte[] tableName) throws SQLException;
+    void clearTableRegionCache(TableName name) throws SQLException;
 
     boolean hasIndexWALCodec();
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 248e4a3..1d9a521 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -106,13 +106,14 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -281,7 +282,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private final Object connectionCountLock = new Object();
     private final boolean returnSequenceValues ;
 
-    private HConnection connection;
+    private Connection connection;
     private ZKClientService txZKClientService;
     private volatile boolean initialized;
     private volatile int nSequenceSaltBuckets;
@@ -423,7 +424,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public HTableInterface getTable(byte[] tableName) throws SQLException {
+    public Table getTable(byte[] tableName) throws SQLException {
         try {
             return HBaseFactoryProvider.getHTableFactory().getTable(tableName, connection, null);
         } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
@@ -435,7 +436,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
 
     @Override
     public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
-        HTableInterface htable = getTable(tableName);
+        Table htable = getTable(tableName);
         try {
             return htable.getTableDescriptor();
         } catch (IOException e) {
@@ -544,8 +545,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public void clearTableRegionCache(byte[] tableName) throws SQLException {
-        connection.clearRegionCache(TableName.valueOf(tableName));
+    public void clearTableRegionCache(TableName tableName) throws SQLException {
+        ((ClusterConnection)connection).clearRegionCache(tableName);
     }
 
     @Override
@@ -564,7 +565,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 List<HRegionLocation> locations = Lists.newArrayList();
                 byte[] currentKey = HConstants.EMPTY_START_ROW;
                 do {
-                    HRegionLocation regionLocation = connection.getRegionLocation(
+                    HRegionLocation regionLocation = ((ClusterConnection)connection).getRegionLocation(
                             TableName.valueOf(tableName), currentKey, reload);
                     locations.add(regionLocation);
                     currentKey = regionLocation.getRegionInfo().getEndKey();
@@ -1170,7 +1171,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         boolean isIncompatible = false;
         int minHBaseVersion = Integer.MAX_VALUE;
         boolean isTableNamespaceMappingEnabled = false;
-        HTableInterface ht = null;
+        Table ht = null;
         try {
             List<HRegionLocation> locations = this
                     .getAllTableRegions(metaTable);
@@ -1271,7 +1272,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     connection.relocateRegion(SchemaUtil.getPhysicalName(tableName, this.getProps()), tableKey);
                 }
 
-                HTableInterface ht = this.getTable(SchemaUtil.getPhysicalName(tableName, this.getProps()).getName());
+                Table ht = this.getTable(SchemaUtil.getPhysicalName(tableName, this.getProps()).getName());
                 try {
                     final Map<byte[], MetaDataResponse> results =
                             ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);
@@ -1323,14 +1324,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         boolean wasDeleted = false;
         try (HBaseAdmin admin = getAdmin()) {
             try {
-                HTableDescriptor desc = admin.getTableDescriptor(physicalIndexName);
+                TableName physicalIndexTableName = TableName.valueOf(physicalIndexName);
+                HTableDescriptor desc = admin.getTableDescriptor(physicalIndexTableName);
                 if (Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES)))) {
                     final ReadOnlyProps props = this.getProps();
                     final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                     if (dropMetadata) {
-                        admin.disableTable(physicalIndexName);
-                        admin.deleteTable(physicalIndexName);
-                        clearTableRegionCache(physicalIndexName);
+                        admin.disableTable(physicalIndexTableName);
+                        admin.deleteTable(physicalIndexTableName);
+                        clearTableRegionCache(physicalIndexTableName);
                         wasDeleted = true;
                     } else {
                         this.tableStatsCache.invalidateAll(desc);
@@ -1366,7 +1368,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     for(String cf: columnFamiles) {
                         admin.deleteColumn(physicalTableName, cf);
                     }
-                    clearTableRegionCache(physicalTableName);
+                    clearTableRegionCache(TableName.valueOf(physicalTableName));
                     wasDeleted = true;
                 }
             } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) {
@@ -1626,7 +1628,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         admin.disableTable(tableName);
                         admin.deleteTable(tableName);
                         tableStatsCache.invalidateAll(htableDesc);
-                        clearTableRegionCache(tableName);
+                        clearTableRegionCache(TableName.valueOf(tableName));
                     } catch (TableNotFoundException ignore) {
                     }
                 }
@@ -2513,7 +2515,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after some time
             tableDesc.addFamily(columnDesc);
             admin.createTable(tableDesc);
-            try (HTableInterface sysMutexTable = getTable(mutexTableName.getName())) {
+            try (Table sysMutexTable = getTable(mutexTableName.getName())) {
                 byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
                         PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
                 Put put = new Put(mutexRowKey);
@@ -2845,7 +2847,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA_BYTES,
                                 PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE_BYTES,
                                 MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP);
-                        clearTableRegionCache(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
+                        clearTableRegionCache(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
                     }
                     nSequenceSaltBuckets = nSaltBuckets;
                 } else {
@@ -3113,7 +3115,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
                 PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
 
-        HTableInterface metatable = null;
+        Table metatable = null;
         try (HBaseAdmin admin = getAdmin()) {
              // SYSTEM namespace needs to be created via HBase API's because "CREATE SCHEMA" statement tries to write its metadata
              // in SYSTEM:CATALOG table. Without SYSTEM namespace, SYSTEM:CATALOG table cannot be created.
@@ -3207,7 +3209,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     getVersion(MIN_SYSTEM_TABLE_TIMESTAMP));
         }
 
-        try (HTableInterface sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
+        try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
             byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
             byte[] qualifier = UPGRADE_MUTEX;
             byte[] oldValue = UPGRADE_MUTEX_UNLOCKED;
@@ -3243,7 +3245,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             return true;
         }
 
-        try (HTableInterface sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
+        try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
             byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
             byte[] qualifier = UPGRADE_MUTEX;
             byte[] expectedValue = UPGRADE_MUTEX_LOCKED;
@@ -3457,7 +3459,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             latestMetaData = newEmptyMetaData();
         }
         tableStatsCache.invalidateAll();
-        try (HTableInterface htable =
+        try (Table htable =
                 this.getTable(
                     SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
                         this.getProps()).getName())) {
@@ -3561,9 +3563,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             sequence.getLock().lock();
             // Now that we have the lock we need, create the sequence
             Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle);
-            HTableInterface htable = this.getTable(SchemaUtil
+            Table htable = this.getTable(SchemaUtil
                     .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
-            htable.setAutoFlush(true);
             try {
                 Result result = htable.append(append);
                 return sequence.createSequence(result, minValue, maxValue, cycle);
@@ -3589,7 +3590,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             sequence.getLock().lock();
             // Now that we have the lock we need, create the sequence
             Append append = sequence.dropSequence(timestamp);
-            HTableInterface htable = this.getTable(SchemaUtil
+            Table htable = this.getTable(SchemaUtil
                     .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
             try {
                 Result result = htable.append(append);
@@ -3686,11 +3687,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             if (toIncrementList.isEmpty()) {
                 return;
             }
-            HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES,this.getProps()).getName());
+            Table hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES,this.getProps()).getName());
             Object[] resultObjects = null;
             SQLException sqlE = null;
             try {
-                resultObjects= hTable.batch(incrementBatch);
+                resultObjects= hTable.batch(incrementBatch, null);
             } catch (IOException e) {
                 sqlE = ServerUtil.parseServerException(e);
             } catch (InterruptedException e) {
@@ -3735,7 +3736,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         // clear the meta data cache for the table here
         try {
             SQLException sqlE = null;
-            HTableInterface htable = this.getTable(SchemaUtil
+            Table htable = this.getTable(SchemaUtil
                     .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
 
             try {
@@ -3810,12 +3811,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             if (toReturnList.isEmpty()) {
                 return;
             }
-            HTableInterface hTable = this.getTable(SchemaUtil
+            Table hTable = this.getTable(SchemaUtil
                     .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
             Object[] resultObjects = null;
             SQLException sqlE = null;
             try {
-                resultObjects= hTable.batch(mutations);
+                hTable.batch(mutations, resultObjects);
             } catch (IOException e){
                 sqlE = ServerUtil.parseServerException(e);
             } catch (InterruptedException e){
@@ -3864,11 +3865,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         if (mutations.isEmpty()) {
             return;
         }
-        HTableInterface hTable = this.getTable(
+        Table hTable = this.getTable(
                 SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
         SQLException sqlE = null;
         try {
-            hTable.batch(mutations);
+            hTable.batch(mutations, null);
         } catch (IOException e) {
             sqlE = ServerUtil.parseServerException(e);
         } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index f15e0b1..410bb71 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -149,7 +149,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public HTableInterface getTable(byte[] tableName) throws SQLException {
+    public Table getTable(byte[] tableName) throws SQLException {
         throw new UnsupportedOperationException();
     }
 
@@ -388,7 +388,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public void clearTableRegionCache(byte[] tableName) throws SQLException {
+    public void clearTableRegionCache(TableName tableName) throws SQLException {
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 6c464eb..e57dadd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -26,9 +26,10 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.MutationPlan;
@@ -66,7 +67,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public HTableInterface getTable(byte[] tableName) throws SQLException {
+    public Table getTable(byte[] tableName) throws SQLException {
         return getDelegate().getTable(tableName);
     }
 
@@ -162,7 +163,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public void clearTableRegionCache(byte[] tableName) throws SQLException {
+    public void clearTableRegionCache(TableName tableName) throws SQLException {
         getDelegate().clearTableRegionCache(tableName);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d27be1b..d695f41 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.PColumnFamily;
@@ -90,7 +90,7 @@ public class GuidePostsCache {
         @Override
         public GuidePostsInfo load(GuidePostsKey statsKey) throws Exception {
             @SuppressWarnings("deprecation")
-            HTableInterface statsHTable = queryServices.getTable(SchemaUtil.getPhysicalName(
+            Table statsHTable = queryServices.getTable(SchemaUtil.getPhysicalName(
                     PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
                             queryServices.getProps()).getName());
             try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
index 72f4182..0912c2c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
@@ -20,8 +20,8 @@ package org.apache.phoenix.query;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 
 /**
  * Factory for creating {@link HConnection}
@@ -36,15 +36,15 @@ public interface HConnectionFactory {
      * @param configuration object
      * @return A HConnection instance
      */
-    HConnection createConnection(Configuration conf) throws IOException;
+    Connection createConnection(Configuration conf) throws IOException;
 
     /**
      * Default implementation.  Uses standard HBase HConnections.
      */
     static class HConnectionFactoryImpl implements HConnectionFactory {
         @Override
-        public HConnection createConnection(Configuration conf) throws IOException {
-            return HConnectionManager.createConnection(conf);
+        public Connection createConnection(Configuration conf) throws IOException {
+            return ConnectionFactory.createConnection(conf);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
index 09dbff6..10a531f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
@@ -20,8 +20,9 @@ package org.apache.phoenix.query;
 import java.io.IOException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Table;
 
 /**
  * Creates clients to access HBase tables.
@@ -39,16 +40,16 @@ public interface HTableFactory {
      * @return An client to access an HBase table.
      * @throws IOException if a server or network exception occurs
      */
-    HTableInterface getTable(byte[] tableName, HConnection connection, ExecutorService pool) throws IOException;
+    Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException;
 
     /**
      * Default implementation.  Uses standard HBase HTables.
      */
     static class HTableFactoryImpl implements HTableFactory {
         @Override
-        public HTableInterface getTable(byte[] tableName, HConnection connection, ExecutorService pool) throws IOException {
+        public Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException {
             // Let the HBase client manage the thread pool instead of passing ours through
-            return connection.getTable(tableName);
+            return connection.getTable(TableName.valueOf(tableName));
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index daf7c70..8f36fd6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -30,10 +30,10 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -124,7 +124,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
                 env.getRegion().getTableDesc());
         } else {
             long guidepostWidth = -1;
-            HTableInterface htable = null;
+            Table htable = null;
             try {
                 // Next check for GUIDE_POST_WIDTH on table
                 htable = env.getTable(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
index 0b9c409..71b01ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
@@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -128,7 +128,7 @@ public class StatisticsUtil {
         return key;
     }
 
-    public static GuidePostsInfo readStatistics(HTableInterface statsHTable, GuidePostsKey key, long clientTimeStamp)
+    public static GuidePostsInfo readStatistics(Table statsHTable, GuidePostsKey key, long clientTimeStamp)
             throws IOException {
         ImmutableBytesWritable ptr = new ImmutableBytesWritable();
         ptr.set(key.getColumnFamily());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index dfca30e..04ad575 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -32,12 +32,12 @@ import java.util.List;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -80,24 +80,24 @@ public class StatisticsWriter implements Closeable {
         if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
             clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis();
         }
-        HTableInterface statsWriterTable = env.getTable(
+        Table statsWriterTable = env.getTable(
                 SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, env.getConfiguration()));
-        HTableInterface statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable);
+        Table statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable);
         StatisticsWriter statsTable = new StatisticsWriter(statsReaderTable, statsWriterTable, tableName,
                 clientTimeStamp, guidePostDepth);
         return statsTable;
     }
 
-    private final HTableInterface statsWriterTable;
+    private final Table statsWriterTable;
     // In HBase 0.98.4 or above, the reader and writer will be the same.
     // In pre HBase 0.98.4, there was a bug in using the HTable returned
     // from a coprocessor for scans, so in that case it'll be different.
-    private final HTableInterface statsReaderTable;
+    private final Table statsReaderTable;
     private final byte[] tableName;
     private final long clientTimeStamp;
     private final long guidePostDepth;
     
-    private StatisticsWriter(HTableInterface statsReaderTable, HTableInterface statsWriterTable, String tableName,
+    private StatisticsWriter(Table statsReaderTable, Table statsWriterTable, String tableName,
             long clientTimeStamp, long guidePostDepth) {
         this.statsReaderTable = statsReaderTable;
         this.statsWriterTable = statsWriterTable;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 0957e56..9798f79 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -48,7 +48,7 @@ import com.google.protobuf.ServiceException;
 
 public class OmidTransactionTable implements PhoenixTransactionalTable {
 
-    public OmidTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
+    public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable) {
         // TODO Auto-generated constructor stub
     }
 
@@ -162,36 +162,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
     }
 
     @Override
-    public long incrementColumnValue(byte[] row, byte[] family,
-            byte[] qualifier, long amount, boolean writeToWAL)
-            throws IOException {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public Boolean[] exists(List<Get> gets) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
-    public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
-        // TODO Auto-generated method stub
-    }
-
-    @Override
-    public void setAutoFlushTo(boolean autoFlush) {
-        // TODO Auto-generated method stub
-    }
-
-    @Override
-    public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
-        // TODO Auto-generated method stub
-        return null;
-    }
-
-    @Override
     public TableName getName() {
         // TODO Auto-generated method stub
         return null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index 7af1c08..1293a21 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -19,36 +19,36 @@ package org.apache.phoenix.transaction;
 
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HTableDescriptor;
 
 import java.io.IOException;
 import java.util.List;
 
-public interface PhoenixTransactionalTable extends HTableInterface {
+public interface PhoenixTransactionalTable extends Table {
 
     /**
-     * Transaction version of {@link HTableInterface#get(Get get)}
+     * Transaction version of {@link Table#get(Get get)}
      * @param get
      * @throws IOException
      */
     public Result get(Get get) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#put(Put put)}
+     * Transactional version of {@link Table#put(Put put)}
      * @param put
      * @throws IOException
      */
     public void put(Put put) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#delete(Delete delete)}
+     * Transactional version of {@link Table#delete(Delete delete)}
      *
      * @param delete
      * @throws IOException
@@ -56,7 +56,7 @@ public interface PhoenixTransactionalTable extends HTableInterface {
     public void delete(Delete delete) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#getScanner(Scan scan)}
+     * Transactional version of {@link Table#getScanner(Scan scan)}
      *
      * @param scan
      * @return ResultScanner
@@ -87,31 +87,31 @@ public interface PhoenixTransactionalTable extends HTableInterface {
     public boolean exists(Get get) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#get(List gets)}
+     * Transactional version of {@link Table#get(List gets)}
      * @throws IOException
      */
     public Result[] get(List<Get> gets) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#getScanner(byte[] family)}
+     * Transactional version of {@link Table#getScanner(byte[] family)}
      * @throws IOException
      */
     public ResultScanner getScanner(byte[] family) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#getScanner(byte[] family, byte[] qualifier)}
+     * Transactional version of {@link Table#getScanner(byte[] family, byte[] qualifier)}
      * @throws IOException
      */
     public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#put(List puts)}
+     * Transactional version of {@link Table#put(List puts)}
      * @throws IOException
      */
     public void put(List<Put> puts) throws IOException;
 
     /**
-     * Transactional version of {@link HTableInterface#delete(List deletes)}
+     * Transactional version of {@link Table#delete(List deletes)}
      * @throws IOException
      */
     public void delete(List<Delete> deletes) throws IOException;