You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by an...@apache.org on 2017/11/27 13:03:14 UTC

[6/8] phoenix git commit: PHOENIX-4403 Workaround Tephra issues and fix all left over compilation issues in phoenix-core

PHOENIX-4403 Workaround Tephra issues and fix all left over compilation issues in phoenix-core

- update dependency of HBase to 2.0.0-beta-1-SNAPSHOT
- Update class to include new API from HBase beta-1
- Remove unused CoprocessorHTableFactory
- Use createConnection() API of environment to initiate shortCircuitConnection with custom conf
- change usage ZooKeeperWatcher to ZKWatcher
- Update PhoenixTransactionalTable,TephraTransactionTable,OmidTransactionTable with interfaces of Table
- Workaround Tephra dependencies like copying TransactionAwareHTable and porting it for HBase-2.0 in Phoenix workspace and creating BaseRegionObserver for TransactionContext.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/488b5281
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/488b5281
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/488b5281

Branch: refs/heads/5.x-HBase-2.0
Commit: 488b5281f23604252c6b9731fab3e031c24ce742
Parents: c3ec80d
Author: Ankit Singhal <an...@gmail.com>
Authored: Mon Nov 27 15:46:00 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Mon Nov 27 15:46:00 2017 +0530

----------------------------------------------------------------------
 .../hbase/coprocessor/BaseRegionObserver.java   |  22 +
 .../DelegateRegionCoprocessorEnvironment.java   |  12 +
 .../apache/phoenix/execute/MutationState.java   |   4 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |   3 +-
 .../index/table/CoprocessorHTableFactory.java   |  50 --
 .../hbase/index/write/IndexWriterUtils.java     |  21 +-
 .../phoenix/index/PhoenixIndexMetaData.java     |   2 +-
 .../apache/phoenix/iterate/SnapshotScanner.java |  11 +
 .../index/automation/PhoenixMRJobSubmitter.java |   8 +-
 .../transaction/OmidTransactionTable.java       |  32 +-
 .../transaction/PhoenixTransactionalTable.java  | 770 +++++++++++++++++--
 .../transaction/TephraTransactionTable.java     |  40 +-
 .../transaction/TransactionAwareHTable.java     | 680 ++++++++++++++++
 .../phoenix/util/ZKBasedMasterElectionUtil.java |   4 +-
 pom.xml                                         |   4 +-
 15 files changed, 1459 insertions(+), 204 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
new file mode 100644
index 0000000..fa206bb
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+public class BaseRegionObserver implements RegionObserver{
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index 00f3316..4e6bb2e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -17,9 +17,11 @@
  */
 package org.apache.phoenix.coprocessor;
 
+import java.io.IOException;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -113,6 +115,16 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn
     public MetricRegistry getMetricRegistryForRegionServer() {
         return delegate.getMetricRegistryForRegionServer();
     }
+
+    @Override
+    public Connection createConnection(Configuration conf) throws IOException {
+        return delegate.createConnection(conf);
+    }
+
+    @Override
+    public ExtendedCellBuilder getCellBuilder() {
+        return delegate.getCellBuilder();
+    }
    
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index eab64f1..e9547f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -90,8 +90,8 @@ import org.apache.phoenix.transaction.PhoenixTransactionalTable;
 import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
@@ -195,7 +195,7 @@ public class MutationState implements SQLCloseable {
             this.mutations.put(table, mutations);
         }
         this.numRows = mutations.size();
-        this.estimatedSize = KeyValueUtil.getEstimatedRowSize(table, mutations);
+        this.estimatedSize = PhoenixKeyValueUtil.getEstimatedRowSize(table, mutations);
         throwIfTooBig();
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index cb8accf..f9b882c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Delete;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -209,7 +209,6 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
           String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration());
           if (errormsg != null) {
             IOException ioe = new IOException(errormsg);
-            env.getRegionServerServices().abort(errormsg, ioe);
             throw ioe;
           }
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
deleted file mode 100644
index 7ca43ea..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.table;
-
-import java.io.IOException;
-import java.util.concurrent.ExecutorService;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-
-public class CoprocessorHTableFactory implements HTableFactory {
-
-    private RegionCoprocessorEnvironment e;
-
-    public CoprocessorHTableFactory(RegionCoprocessorEnvironment e) {
-        this.e = e;
-    }
-
-    @Override
-    public Table getTable(ImmutableBytesPtr tablename) throws IOException {
-        return this.e.getConnection().getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
-    }
-    
-    @Override
-    public Table getTable(ImmutableBytesPtr tablename,ExecutorService pool) throws IOException {
-        return this.e.getConnection().getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool);
-    }
-
-    @Override
-    public void shutdown() {
-        // noop
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 0ef7e18..bfdcbd8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -27,12 +27,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.CoprocessorHConnection;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.phoenix.hbase.index.table.CoprocessorHTableFactory;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
@@ -93,14 +89,7 @@ public class IndexWriterUtils {
                     IndexWriterUtils.DEFAULT_NUM_PER_TABLE_THREADS);
         LOG.trace("Creating HTableFactory with " + htableThreads + " threads for each HTable.");
         IndexManagementUtil.setIfNotSet(conf, HTABLE_THREAD_KEY, htableThreads);
-        if (env instanceof RegionCoprocessorEnvironment) {
-            RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
-            RegionServerServices services =e.getRegionServerServices();
-            if (services instanceof HRegionServer) {
-                return new CoprocessorHConnectionTableFactory(conf, (HRegionServer) services);
-            }
-        }
-        return new CoprocessorHTableFactory(env);
+        return new CoprocessorHConnectionTableFactory(conf, env);
     }
 
     /**
@@ -112,16 +101,16 @@ public class IndexWriterUtils {
         @GuardedBy("CoprocessorHConnectionTableFactory.this")
         private Connection connection;
         private final Configuration conf;
-        private final HRegionServer server;
+        private RegionCoprocessorEnvironment env;
 
-        CoprocessorHConnectionTableFactory(Configuration conf, HRegionServer server) {
+        CoprocessorHConnectionTableFactory(Configuration conf, RegionCoprocessorEnvironment env) {
             this.conf = conf;
-            this.server = server;
+            this.env = env;
         }
 
         private synchronized Connection getConnection(Configuration conf) throws IOException {
             if (connection == null || connection.isClosed()) {
-                connection = new CoprocessorHConnection(conf, server);
+                connection = env.createConnection(conf);
             }
             return connection;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
index 05371a6..14c66b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
@@ -83,7 +83,7 @@ public class PhoenixIndexMetaData implements IndexMetaData {
             IndexMetaDataCache indexCache = (IndexMetaDataCache)cache.getServerCache(new ImmutableBytesPtr(uuid));
             if (indexCache == null) {
                 String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + "host="
-                        + env.getRegionServerServices().getServerName();
+                        + env.getServerName().getServerName();
                 SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND).setMessage(msg)
                         .build().buildException();
                 ServerUtil.throwIOException("Index update failed", e); // will not return

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 35f40ac..cfb3149 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AbstractClientScanner;
 import org.apache.hadoop.hbase.client.Connection;
@@ -192,6 +193,16 @@ public class SnapshotScanner extends AbstractClientScanner {
     public MetricRegistry getMetricRegistryForRegionServer() {
         throw new UnsupportedOperationException();
     }
+
+    @Override
+    public Connection createConnection(Configuration conf) throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ExtendedCellBuilder getCellBuilder() {
+        throw new UnsupportedOperationException();
+    }
     };
   }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 9c447e8..662f0c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.mapreduce.index.IndexTool;
@@ -53,8 +53,8 @@ import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.util.PhoenixMRJobUtil;
-import org.apache.phoenix.util.UpgradeUtil;
 import org.apache.phoenix.util.PhoenixMRJobUtil.MR_SCHEDULER_TYPE;
+import org.apache.phoenix.util.UpgradeUtil;
 import org.apache.phoenix.util.ZKBasedMasterElectionUtil;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONObject;
@@ -182,8 +182,8 @@ public class PhoenixMRJobSubmitter {
 
     public int scheduleIndexBuilds() throws Exception {
 
-        ZooKeeperWatcher zookeeperWatcher =
-                new ZooKeeperWatcher(conf, "phoenixAutomatedMRIndexBuild", null);
+        ZKWatcher zookeeperWatcher =
+                new ZKWatcher(conf, "phoenixAutomatedMRIndexBuild", null);
 
         if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
             AUTO_INDEX_BUILD_LOCK_NAME)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 047ccf6..78d7e4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -79,11 +79,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
         return null;
     }
 
-    @Override
-    public byte[] getTableName() {
-        // TODO Auto-generated method stub
-        return null;
-    }
 
     @Override
     public Configuration getConfiguration() {
@@ -132,32 +127,7 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
         // TODO Auto-generated method stub
     }
 
-    @Override
-    public void setAutoFlush(boolean autoFlush) {
-        // TODO Auto-generated method stub
-    }
-
-    @Override
-    public boolean isAutoFlush() {
-        // TODO Auto-generated method stub
-        return false;
-    }
-
-    @Override
-    public long getWriteBufferSize() {
-        // TODO Auto-generated method stub
-        return 0;
-    }
-
-    @Override
-    public void setWriteBufferSize(long writeBufferSize) throws IOException {
-        // TODO Auto-generated method stub
-    }
-
-    @Override
-    public void flushCommits() throws IOException {
-        // TODO Auto-generated method stub
-    }
+    
 
     @Override
     public void close() throws IOException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index aced376..deceac6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -17,132 +17,782 @@
  */
 package org.apache.phoenix.transaction;
 
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 
-import java.io.IOException;
-import java.util.List;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
 
 public interface PhoenixTransactionalTable extends Table {
+    /**
+     * Gets the fully qualified table name instance of this table.
+     */
+    TableName getName();
 
     /**
-     * Transaction version of {@link Table#get(Get get)}
-     * @param get
-     * @throws IOException
+     * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
+     * <p>
+     * The reference returned is not a copy, so any change made to it will
+     * affect this instance.
      */
-    public Result get(Get get) throws IOException;
+    Configuration getConfiguration();
 
     /**
-     * Transactional version of {@link Table#put(Put put)}
-     * @param put
-     * @throws IOException
+     * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
+     * @throws java.io.IOException if a remote or network exception occurs.
+     * @deprecated since 2.0 version and will be removed in 3.0 version.
+     *             use {@link #getDescriptor()}
+     */
+    @Deprecated
+    HTableDescriptor getTableDescriptor() throws IOException;
+
+    /**
+     * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table.
+     * @throws java.io.IOException if a remote or network exception occurs.
      */
-    public void put(Put put) throws IOException;
+    TableDescriptor getDescriptor() throws IOException;
 
     /**
-     * Transactional version of {@link Table#delete(Delete delete)}
+     * Test for the existence of columns in the table, as specified by the Get.
+     * <p>
      *
-     * @param delete
-     * @throws IOException
+     * This will return true if the Get matches one or more keys, false if not.
+     * <p>
+     *
+     * This is a server-side call so it prevents any data from being transfered to
+     * the client.
+     *
+     * @param get the Get
+     * @return true if the specified Get matches one or more keys, false if not
+     * @throws IOException e
      */
-    public void delete(Delete delete) throws IOException;
+    boolean exists(Get get) throws IOException;
 
     /**
-     * Transactional version of {@link Table#getScanner(Scan scan)}
+     * Test for the existence of columns in the table, as specified by the Gets.
+     * <p>
+     *
+     * This will return an array of booleans. Each value will be true if the related Get matches
+     * one or more keys, false if not.
+     * <p>
+     *
+     * This is a server-side call so it prevents any data from being transferred to
+     * the client.
      *
-     * @param scan
-     * @return ResultScanner
+     * @param gets the Gets
+     * @return Array of boolean.  True if the specified Get matches one or more keys, false if not.
+     * @throws IOException e
+     */
+    boolean[] exists(List<Get> gets) throws IOException;
+
+    /**
+     * Test for the existence of columns in the table, as specified by the Gets.
+     * This will return an array of booleans. Each value will be true if the related Get matches
+     * one or more keys, false if not.
+     * This is a server-side call so it prevents any data from being transferred to
+     * the client.
+     *
+     * @param gets the Gets
+     * @return Array of boolean.  True if the specified Get matches one or more keys, false if not.
+     * @throws IOException e
+     * @deprecated since 2.0 version and will be removed in 3.0 version.
+     *             use {@link #exists(List)}
+     */
+    @Deprecated
+    default boolean[] existsAll(List<Get> gets) throws IOException {
+      return exists(gets);
+    }
+
+    /**
+     * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations.
+     * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
+     * Get in the same {@link #batch} call, you will not necessarily be
+     * guaranteed that the Get returns what the Put had put.
+     *
+     * @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
+     * @param results Empty Object[], same size as actions. Provides access to partial
+     *                results, in case an exception is thrown. A null in the result array means that
+     *                the call for that action failed, even after retries. The order of the objects
+     *                in the results array corresponds to the order of actions in the request list.
      * @throws IOException
+     * @since 0.90.0
      */
-    public ResultScanner getScanner(Scan scan) throws IOException;
+    void batch(final List<? extends Row> actions, final Object[] results) throws IOException,
+      InterruptedException;
 
     /**
-     * Returns Htable name
+     * Same as {@link #batch(List, Object[])}, but with a callback.
+     * @since 0.96.0
      */
-    public byte[] getTableName();
+    <R> void batchCallback(
+      final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
+    ) throws IOException, InterruptedException;
 
     /**
-     * Returns Htable configuration object
+     * Extracts certain cells from a given row.
+     * @param get The object that specifies what data to fetch and from which row.
+     * @return The data coming from the specified row, if it exists.  If the row
+     * specified doesn't exist, the {@link Result} instance returned won't
+     * contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public Configuration getConfiguration();
+    Result get(Get get) throws IOException;
 
     /**
-     * Returns HTableDescriptor of Htable
-     * @throws IOException
+     * Extracts specified cells from the given rows, as a batch.
+     *
+     * @param gets The objects that specify what data to fetch and from which rows.
+     * @return The data coming from the specified rows, if it exists.  If the row specified doesn't
+     * exist, the {@link Result} instance returned won't contain any {@link
+     * org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If there are any
+     * failures even after retries, there will be a <code>null</code> in the results' array for those
+     * Gets, AND an exception will be thrown. The ordering of the Result array corresponds to the order
+     * of the list of passed in Gets.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.90.0
+     * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client.
+     * Currently {@link #get(List)} doesn't run any validations on the client-side, currently there
+     * is no need, but this may change in the future. An
+     * {@link IllegalArgumentException} will be thrown in this case.
      */
-    public HTableDescriptor getTableDescriptor() throws IOException;
+    Result[] get(List<Get> gets) throws IOException;
 
     /**
-     * Checks if cell exists
-     * @throws IOException
+     * Returns a scanner on the current table as specified by the {@link Scan}
+     * object.
+     * Note that the passed {@link Scan}'s start row and caching properties
+     * maybe changed.
+     *
+     * @param scan A configured {@link Scan} object.
+     * @return A scanner.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public boolean exists(Get get) throws IOException;
+    ResultScanner getScanner(Scan scan) throws IOException;
 
     /**
-     * Transactional version of {@link Table#get(List gets)}
-     * @throws IOException
+     * Gets a scanner on the current table for the given family.
+     *
+     * @param family The column family to scan.
+     * @return A scanner.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public Result[] get(List<Get> gets) throws IOException;
+    ResultScanner getScanner(byte[] family) throws IOException;
 
     /**
-     * Transactional version of {@link Table#getScanner(byte[] family)}
-     * @throws IOException
+     * Gets a scanner on the current table for the given family and qualifier.
+     *
+     * @param family The column family to scan.
+     * @param qualifier The column qualifier to scan.
+     * @return A scanner.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public ResultScanner getScanner(byte[] family) throws IOException;
+    ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
+
 
     /**
-     * Transactional version of {@link Table#getScanner(byte[] family, byte[] qualifier)}
-     * @throws IOException
+     * Puts some data in the table.
+     *
+     * @param put The data to put.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
+    void put(Put put) throws IOException;
 
     /**
-     * Transactional version of {@link Table#put(List puts)}
-     * @throws IOException
+     * Batch puts the specified data into the table.
+     * <p>
+     * This can be used for group commit, or for submitting user defined batches. Before sending
+     * a batch of mutations to the server, the client runs a few validations on the input list. If an
+     * error is found, for example, a mutation was supplied but was missing it's column an
+     * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there
+     * are any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be
+     * thrown. RetriesExhaustedWithDetailsException contains lists of failed mutations and
+     * corresponding remote exceptions. The ordering of mutations and exceptions in the
+     * encapsulating exception corresponds to the order of the input list of Put requests.
+     *
+     * @param puts The list of mutations to apply.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public void put(List<Put> puts) throws IOException;
+    void put(List<Put> puts) throws IOException;
 
     /**
-     * Transactional version of {@link Table#delete(List deletes)}
-     * @throws IOException
+     * Atomically checks if a row/family/qualifier value matches the expected
+     * value. If it does, it adds the put.  If the passed value is null, the check
+     * is for the lack of column (ie: non-existance)
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param value the expected value
+     * @param put data to put if check succeeds
+     * @throws IOException e
+     * @return true if the new put was executed, false otherwise
      */
-    public void delete(List<Delete> deletes) throws IOException;
+    boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+      byte[] value, Put put) throws IOException;
 
     /**
-     * Delegates to {@link Table#setAutoFlush(boolean autoFlush)}
+     * Atomically checks if a row/family/qualifier value matches the expected
+     * value. If it does, it adds the put.  If the passed value is null, the check
+     * is for the lack of column (ie: non-existence)
+     *
+     * The expected value argument of this call is on the left and the current
+     * value of the cell is on the right side of the comparison operator.
+     *
+     * Ie. eg. GREATER operator means expected value > existing <=> add the put.
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param compareOp comparison operator to use
+     * @param value the expected value
+     * @param put data to put if check succeeds
+     * @throws IOException e
+     * @return true if the new put was executed, false otherwise
+     * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+     *  {@link #checkAndPut(byte[], byte[], byte[], CompareOperator, byte[], Put)}}
+     */
+    @Deprecated
+    boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+      CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException;
+
+    /**
+     * Atomically checks if a row/family/qualifier value matches the expected
+     * value. If it does, it adds the put.  If the passed value is null, the check
+     * is for the lack of column (ie: non-existence)
+     *
+     * The expected value argument of this call is on the left and the current
+     * value of the cell is on the right side of the comparison operator.
+     *
+     * Ie. eg. GREATER operator means expected value > existing <=> add the put.
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param op comparison operator to use
+     * @param value the expected value
+     * @param put data to put if check succeeds
+     * @throws IOException e
+     * @return true if the new put was executed, false otherwise
+     */
+    boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+                        CompareOperator op, byte[] value, Put put) throws IOException;
+
+    /**
+     * Deletes the specified cells/row.
+     *
+     * @param delete The object that specifies what to delete.
+     * @throws IOException if a remote or network exception occurs.
+     * @since 0.20.0
      */
-    public void setAutoFlush(boolean autoFlush);
+    void delete(Delete delete) throws IOException;
 
     /**
-     * Delegates to {@link Table#isAutoFlush()}
+     * Batch Deletes the specified cells/rows from the table.
+     * <p>
+     * If a specified row does not exist, {@link Delete} will report as though sucessful
+     * delete; no exception will be thrown. If there are any failures even after retries,
+     * a * {@link RetriesExhaustedWithDetailsException} will be thrown.
+     * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and
+     * corresponding remote exceptions.
+     *
+     * @param deletes List of things to delete. The input list gets modified by this
+     * method. All successfully applied {@link Delete}s in the list are removed (in particular it
+     * gets re-ordered, so the order in which the elements are inserted in the list gives no
+     * guarantee as to the order in which the {@link Delete}s are executed).
+     * @throws IOException if a remote or network exception occurs. In that case
+     * the {@code deletes} argument will contain the {@link Delete} instances
+     * that have not be successfully applied.
+     * @since 0.20.1
+     * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
+     * {@link #put(List)} runs pre-flight validations on the input list on client. Currently
+     * {@link #delete(List)} doesn't run validations on the client, there is no need currently,
+     * but this may change in the future. An * {@link IllegalArgumentException} will be thrown
+     * in this case.
      */
-    public boolean isAutoFlush();
+    void delete(List<Delete> deletes) throws IOException;
 
     /**
-     * Delegates to see Table.getWriteBufferSize()
+     * Atomically checks if a row/family/qualifier value matches the expected
+     * value. If it does, it adds the delete.  If the passed value is null, the
+     * check is for the lack of column (ie: non-existance)
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param value the expected value
+     * @param delete data to delete if check succeeds
+     * @throws IOException e
+     * @return true if the new delete was executed, false otherwise
      */
-    public long getWriteBufferSize();
+    boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+      byte[] value, Delete delete) throws IOException;
 
     /**
-     * Delegates to see Table.setWriteBufferSize()
+     * Atomically checks if a row/family/qualifier value matches the expected
+     * value. If it does, it adds the delete.  If the passed value is null, the
+     * check is for the lack of column (ie: non-existence)
+     *
+     * The expected value argument of this call is on the left and the current
+     * value of the cell is on the right side of the comparison operator.
+     *
+     * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param compareOp comparison operator to use
+     * @param value the expected value
+     * @param delete data to delete if check succeeds
+     * @throws IOException e
+     * @return true if the new delete was executed, false otherwise
+     * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+     *  {@link #checkAndDelete(byte[], byte[], byte[], byte[], Delete)}
      */
-    public void setWriteBufferSize(long writeBufferSize) throws IOException;
+    @Deprecated
+    boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+      CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException;
 
     /**
-     * Delegates to see Table.flushCommits()
+     * Atomically checks if a row/family/qualifier value matches the expected
+     * value. If it does, it adds the delete.  If the passed value is null, the
+     * check is for the lack of column (ie: non-existence)
+     *
+     * The expected value argument of this call is on the left and the current
+     * value of the cell is on the right side of the comparison operator.
+     *
+     * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param op comparison operator to use
+     * @param value the expected value
+     * @param delete data to delete if check succeeds
+     * @throws IOException e
+     * @return true if the new delete was executed, false otherwise
      */
-    public void flushCommits() throws IOException;
+    boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+                           CompareOperator op, byte[] value, Delete delete) throws IOException;
 
     /**
-     * Releases resources
+     * Performs multiple mutations atomically on a single row. Currently
+     * {@link Put} and {@link Delete} are supported.
+     *
+     * @param rm object that specifies the set of mutations to perform atomically
      * @throws IOException
      */
-    public void close() throws IOException;
+    void mutateRow(final RowMutations rm) throws IOException;
+
+    /**
+     * Appends values to one or more columns within a single row.
+     * <p>
+     * This operation guaranteed atomicity to readers. Appends are done
+     * under a single row lock, so write operations to a row are synchronized, and
+     * readers are guaranteed to see this operation fully completed.
+     *
+     * @param append object that specifies the columns and amounts to be used
+     *                  for the increment operations
+     * @throws IOException e
+     * @return values of columns after the append operation (maybe null)
+     */
+    Result append(final Append append) throws IOException;
+
+    /**
+     * Increments one or more columns within a single row.
+     * <p>
+     * This operation ensures atomicity to readers. Increments are done
+     * under a single row lock, so write operations to a row are synchronized, and
+     * readers are guaranteed to see this operation fully completed.
+     *
+     * @param increment object that specifies the columns and amounts to be used
+     *                  for the increment operations
+     * @throws IOException e
+     * @return values of columns after the increment
+     */
+    Result increment(final Increment increment) throws IOException;
+
+    /**
+     * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
+     * <p>
+     * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
+     * @param row The row that contains the cell to increment.
+     * @param family The column family of the cell to increment.
+     * @param qualifier The column qualifier of the cell to increment.
+     * @param amount The amount to increment the cell with (or decrement, if the
+     * amount is negative).
+     * @return The new value, post increment.
+     * @throws IOException if a remote or network exception occurs.
+     */
+    long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+      long amount) throws IOException;
+
+    /**
+     * Atomically increments a column value. If the column value already exists
+     * and is not a big-endian long, this could throw an exception. If the column
+     * value does not yet exist it is initialized to <code>amount</code> and
+     * written to the specified column.
+     *
+     * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
+     * scenario you will lose any increments that have not been flushed.
+     * @param row The row that contains the cell to increment.
+     * @param family The column family of the cell to increment.
+     * @param qualifier The column qualifier of the cell to increment.
+     * @param amount The amount to increment the cell with (or decrement, if the
+     * amount is negative).
+     * @param durability The persistence guarantee for this increment.
+     * @return The new value, post increment.
+     * @throws IOException if a remote or network exception occurs.
+     */
+    long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+      long amount, Durability durability) throws IOException;
+
+    /**
+     * Releases any resources held or pending changes in internal buffers.
+     *
+     * @throws IOException if a remote or network exception occurs.
+     */
+    @Override
+    void close() throws IOException;
+
+    /**
+     * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
+     * table region containing the specified row.  The row given does not actually have
+     * to exist.  Whichever region would contain the row based on start and end keys will
+     * be used.  Note that the {@code row} parameter is also not passed to the
+     * coprocessor handler registered for this protocol, unless the {@code row}
+     * is separately passed as an argument in the service request.  The parameter
+     * here is only used to locate the region used to handle the call.
+     *
+     * <p>
+     * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
+     * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
+     * </p>
+     *
+     * <div style="background-color: #cccccc; padding: 2px">
+     * <blockquote><pre>
+     * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
+     * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
+     * MyCallRequest request = MyCallRequest.newBuilder()
+     *     ...
+     *     .build();
+     * MyCallResponse response = service.myCall(null, request);
+     * </pre></blockquote></div>
+     *
+     * @param row The row key used to identify the remote region location
+     * @return A CoprocessorRpcChannel instance
+     */
+    CoprocessorRpcChannel coprocessorService(byte[] row);
+
+    /**
+     * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+     * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
+     * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+     * with each {@link com.google.protobuf.Service} instance.
+     *
+     * @param service the protocol buffer {@code Service} implementation to call
+     * @param startKey start region selection with region containing this row.  If {@code null}, the
+     * selection will start with the first table region.
+     * @param endKey select regions up to and including the region containing this row. If {@code
+     * null}, selection will continue through the last table region.
+     * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
+     * .Call#call}
+     * method will be invoked once per table region, using the {@link com.google.protobuf.Service}
+     * instance connected to that region.
+     * @param <T> the {@link com.google.protobuf.Service} subclass to connect to
+     * @param <R> Return type for the {@code callable} parameter's {@link
+     * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+     * @return a map of result values keyed by region name
+     */
+    <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
+      byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
+      throws ServiceException, Throwable;
+
+    /**
+     * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+     * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
+     * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+     * with each {@link Service} instance.
+     *
+     * <p> The given {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],
+     * byte[], Object)} method will be called with the return value from each region's {@link
+     * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. </p>
+     *
+     * @param service the protocol buffer {@code Service} implementation to call
+     * @param startKey start region selection with region containing this row.  If {@code null}, the
+     * selection will start with the first table region.
+     * @param endKey select regions up to and including the region containing this row. If {@code
+     * null}, selection will continue through the last table region.
+     * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
+     * .Call#call}
+     * method will be invoked once per table region, using the {@link Service} instance connected to
+     * that region.
+     * @param callback
+     * @param <T> the {@link Service} subclass to connect to
+     * @param <R> Return type for the {@code callable} parameter's {@link
+     * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+     */
+    <T extends Service, R> void coprocessorService(final Class<T> service,
+      byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
+      final Batch.Callback<R> callback) throws ServiceException, Throwable;
+
+    /**
+     * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+     * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
+     * the invocations to the same region server will be batched into one call. The coprocessor
+     * service is invoked according to the service instance, method name and parameters.
+     *
+     * @param methodDescriptor
+     *          the descriptor for the protobuf service method to call.
+     * @param request
+     *          the method call parameters
+     * @param startKey
+     *          start region selection with region containing this row. If {@code null}, the
+     *          selection will start with the first table region.
+     * @param endKey
+     *          select regions up to and including the region containing this row. If {@code null},
+     *          selection will continue through the last table region.
+     * @param responsePrototype
+     *          the proto type of the response of the method in Service.
+     * @param <R>
+     *          the response type for the coprocessor Service method
+     * @throws ServiceException
+     * @throws Throwable
+     * @return a map of result values keyed by region name
+     */
+    <R extends Message> Map<byte[], R> batchCoprocessorService(
+      Descriptors.MethodDescriptor methodDescriptor, Message request,
+      byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable;
+
+    /**
+     * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+     * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
+     * the invocations to the same region server will be batched into one call. The coprocessor
+     * service is invoked according to the service instance, method name and parameters.
+     *
+     * <p>
+     * The given
+     * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
+     * method will be called with the return value from each region's invocation.
+     * </p>
+     *
+     * @param methodDescriptor
+     *          the descriptor for the protobuf service method to call.
+     * @param request
+     *          the method call parameters
+     * @param startKey
+     *          start region selection with region containing this row. If {@code null}, the
+     *          selection will start with the first table region.
+     * @param endKey
+     *          select regions up to and including the region containing this row. If {@code null},
+     *          selection will continue through the last table region.
+     * @param responsePrototype
+     *          the proto type of the response of the method in Service.
+     * @param callback
+     *          callback to invoke with the response for each region
+     * @param <R>
+     *          the response type for the coprocessor Service method
+     * @throws ServiceException
+     * @throws Throwable
+     */
+    <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor,
+      Message request, byte[] startKey, byte[] endKey, R responsePrototype,
+      Batch.Callback<R> callback) throws ServiceException, Throwable;
+
+    /**
+     * Atomically checks if a row/family/qualifier value matches the expected value.
+     * If it does, it performs the row mutations.  If the passed value is null, the check
+     * is for the lack of column (ie: non-existence)
+     *
+     * The expected value argument of this call is on the left and the current
+     * value of the cell is on the right side of the comparison operator.
+     *
+     * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param compareOp the comparison operator
+     * @param value the expected value
+     * @param mutation  mutations to perform if check succeeds
+     * @throws IOException e
+     * @return true if the new put was executed, false otherwise
+     * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+     * {@link #checkAndMutate(byte[], byte[], byte[], CompareOperator, byte[], RowMutations)}
+     */
+    @Deprecated
+    boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
+        CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException;
+
+    /**
+     * Atomically checks if a row/family/qualifier value matches the expected value.
+     * If it does, it performs the row mutations.  If the passed value is null, the check
+     * is for the lack of column (ie: non-existence)
+     *
+     * The expected value argument of this call is on the left and the current
+     * value of the cell is on the right side of the comparison operator.
+     *
+     * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
+     *
+     * @param row to check
+     * @param family column family to check
+     * @param qualifier column qualifier to check
+     * @param op the comparison operator
+     * @param value the expected value
+     * @param mutation  mutations to perform if check succeeds
+     * @throws IOException e
+     * @return true if the new put was executed, false otherwise
+     */
+    boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+                           byte[] value, RowMutations mutation) throws IOException;
+
+    /**
+     * Get timeout of each rpc request in this Table instance. It will be overridden by a more
+     * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
+     * @see #getReadRpcTimeout(TimeUnit)
+     * @see #getWriteRpcTimeout(TimeUnit)
+     * @param unit the unit of time the timeout to be represented in
+     * @return rpc timeout in the specified time unit
+     */
+    long getRpcTimeout(TimeUnit unit);
+
+    /**
+     * Get timeout (millisecond) of each rpc request in this Table instance.
+     *
+     * @return Currently configured read timeout
+     * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or
+     *             {@link #getWriteRpcTimeout(TimeUnit)} instead
+     */
+    @Deprecated
+    int getRpcTimeout();
+
+    /**
+     * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
+     * override the value of hbase.rpc.timeout in configuration.
+     * If a rpc request waiting too long, it will stop waiting and send a new request to retry until
+     * retries exhausted or operation timeout reached.
+     * <p>
+     * NOTE: This will set both the read and write timeout settings to the provided value.
+     *
+     * @param rpcTimeout the timeout of each rpc request in millisecond.
+     *
+     * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
+     */
+    @Deprecated
+    void setRpcTimeout(int rpcTimeout);
+
+    /**
+     * Get timeout of each rpc read request in this Table instance.
+     * @param unit the unit of time the timeout to be represented in
+     * @return read rpc timeout in the specified time unit
+     */
+    long getReadRpcTimeout(TimeUnit unit);
+
+    /**
+     * Get timeout (millisecond) of each rpc read request in this Table instance.
+     * @deprecated since 2.0 and will be removed in 3.0 version
+     *             use {@link #getReadRpcTimeout(TimeUnit)} instead
+     */
+    @Deprecated
+    int getReadRpcTimeout();
+
+    /**
+     * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
+     * override the value of hbase.rpc.read.timeout in configuration.
+     * If a rpc read request waiting too long, it will stop waiting and send a new request to retry
+     * until retries exhausted or operation timeout reached.
+     *
+     * @param readRpcTimeout the timeout for read rpc request in milliseconds
+     * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
+     */
+    @Deprecated
+    void setReadRpcTimeout(int readRpcTimeout);
+
+    /**
+     * Get timeout of each rpc write request in this Table instance.
+     * @param unit the unit of time the timeout to be represented in
+     * @return write rpc timeout in the specified time unit
+     */
+    long getWriteRpcTimeout(TimeUnit unit);
+
+    /**
+     * Get timeout (millisecond) of each rpc write request in this Table instance.
+     * @deprecated since 2.0 and will be removed in 3.0 version
+     *             use {@link #getWriteRpcTimeout(TimeUnit)} instead
+     */
+    @Deprecated
+    int getWriteRpcTimeout();
+
+    /**
+     * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
+     * override the value of hbase.rpc.write.timeout in configuration.
+     * If a rpc write request waiting too long, it will stop waiting and send a new request to retry
+     * until retries exhausted or operation timeout reached.
+     *
+     * @param writeRpcTimeout the timeout for write rpc request in milliseconds
+     * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
+     */
+    @Deprecated
+    void setWriteRpcTimeout(int writeRpcTimeout);
+
+    /**
+     * Get timeout of each operation in Table instance.
+     * @param unit the unit of time the timeout to be represented in
+     * @return operation rpc timeout in the specified time unit
+     */
+    long getOperationTimeout(TimeUnit unit);
+
+    /**
+     * Get timeout (millisecond) of each operation for in Table instance.
+     * @deprecated since 2.0 and will be removed in 3.0 version
+     *             use {@link #getOperationTimeout(TimeUnit)} instead
+     */
+    @Deprecated
+    int getOperationTimeout();
+
+    /**
+     * Set timeout (millisecond) of each operation in this Table instance, will override the value
+     * of hbase.client.operation.timeout in configuration.
+     * Operation timeout is a top-level restriction that makes sure a blocking method will not be
+     * blocked more than this. In each operation, if rpc request fails because of timeout or
+     * other reason, it will retry until success or throw a RetriesExhaustedException. But if the
+     * total time being blocking reach the operation timeout before retries exhausted, it will break
+     * early and throw SocketTimeoutException.
+     * @param operationTimeout the total timeout of each operation in millisecond.
+     * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
+     */
+    @Deprecated
+    void setOperationTimeout(int operationTimeout);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index e248f33..f9de869 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
 
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
@@ -69,7 +68,8 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
 
         tephraTransactionContext = (TephraTransactionContext) ctx;
 
-        transactionAwareHTable = new TransactionAwareHTable(hTable, (pTable != null && pTable.isImmutableRows()) ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
+        transactionAwareHTable = new TransactionAwareHTable(hTable, (pTable != null && pTable.isImmutableRows())
+                ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
 
         tephraTransactionContext.addTransactionAware(transactionAwareHTable);
 
@@ -98,10 +98,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
         return transactionAwareHTable.getScanner(scan);
     }
 
-    @Override
-    public byte[] getTableName() {
-        return transactionAwareHTable.getTableName();
-    }
 
     @Override
     public Configuration getConfiguration() {
@@ -145,31 +141,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
     }
 
     @Override
-    public void setAutoFlush(boolean autoFlush) {
-        transactionAwareHTable.setAutoFlush(autoFlush);
-    }
-
-    @Override
-    public boolean isAutoFlush() {
-        return transactionAwareHTable.isAutoFlush();
-    }
-
-    @Override
-    public long getWriteBufferSize() {
-        return transactionAwareHTable.getWriteBufferSize();
-    }
-
-    @Override
-    public void setWriteBufferSize(long writeBufferSize) throws IOException {
-        transactionAwareHTable.setWriteBufferSize(writeBufferSize);
-    }
-
-    @Override
-    public void flushCommits() throws IOException {
-        transactionAwareHTable.flushCommits();
-    }
-
-    @Override
     public void close() throws IOException {
         transactionAwareHTable.close();
     }
@@ -333,7 +304,7 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
     @Override
     public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
             byte[] value, RowMutations mutation) throws IOException {
-        return transactionAwareHTable.checkAndMutate(row, family, qualifier, op, value, mutations);
+        return transactionAwareHTable.checkAndMutate(row, family, qualifier, op, value, mutation);
     }
 
     @Override
@@ -353,7 +324,7 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
 
     @Override
     public void setWriteRpcTimeout(int writeRpcTimeout) {
-        return transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
+        transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
     }
 
     @Override
@@ -380,4 +351,5 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
     public long getOperationTimeout(TimeUnit unit) {
         return transactionAwareHTable.getOperationTimeout(unit);
     }
-}
+
+}
\ No newline at end of file