You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by an...@apache.org on 2017/11/27 13:03:16 UTC

[8/8] phoenix git commit: PHOENIX-4404 Fix all unit tests cases(not IT)

PHOENIX-4404 Fix all unit tests cases(not IT)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/37200681
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/37200681
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/37200681

Branch: refs/heads/5.x-HBase-2.0
Commit: 372006816245e683746787c1d1a7d6719c232ed3
Parents: 5b9a07d
Author: Ankit Singhal <an...@gmail.com>
Authored: Mon Nov 27 18:29:01 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Mon Nov 27 18:29:01 2017 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/AggregateQueryIT.java       |  2 -
 .../end2end/ExplainPlanWithStatsEnabledIT.java  |  7 +--
 .../phoenix/end2end/FlappingLocalIndexIT.java   |  3 +-
 .../end2end/TableSnapshotReadsMapReduceIT.java  |  4 +-
 .../phoenix/end2end/index/ImmutableIndexIT.java | 11 ++--
 .../end2end/index/MutableIndexFailureIT.java    |  4 +-
 .../end2end/index/PartialIndexRebuilderIT.java  |  4 +-
 .../end2end/index/txn/TxWriteFailureIT.java     |  6 +--
 .../apache/phoenix/execute/PartialCommitIT.java |  3 +-
 .../SystemCatalogWALEntryFilterIT.java          | 20 ++++---
 .../TestPhoenixIndexRpcSchedulerFactory.java    | 23 ++++++--
 .../hbase/index/covered/data/IndexMemStore.java |  7 ++-
 .../TrackingParallelWriterIndexCommitter.java   |  5 +-
 .../phoenix/util/PhoenixKeyValueUtil.java       |  5 +-
 .../phoenix/execute/UnnestArrayPlanTest.java    |  3 +-
 .../CoveredColumnIndexSpecifierBuilder.java     |  7 ++-
 .../index/covered/data/TestIndexMemStore.java   |  8 ++-
 .../hbase/index/write/TestIndexWriter.java      | 56 ++++++++++----------
 .../index/write/TestParalleIndexWriter.java     | 11 ++--
 .../write/TestParalleWriterIndexCommitter.java  | 31 +++++------
 .../index/write/TestWALRecoveryCaching.java     |  6 ++-
 .../query/ConnectionQueryServicesImplTest.java  |  2 +-
 .../schema/stats/StatisticsScannerTest.java     | 37 ++++++++-----
 23 files changed, 150 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
index cb892c6..bcfb922 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.util.ByteUtil;
@@ -105,7 +104,6 @@ public class AggregateQueryIT extends BaseQueryIT {
             
             TableName tn =TableName.valueOf(tableName);
             admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
-            Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(tableNameBytes);
             Configuration configuration = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
             org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(configuration);
             ((ClusterConnection)hbaseConn).clearRegionCache(TableName.valueOf(tableName));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index bfc6819..74f4f34 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -30,7 +30,8 @@ import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
@@ -867,10 +868,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
         createTestTable(getUrl(), ddl, null, null);
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             // split such that some data for view2 resides on region of view1
-            try (HBaseAdmin admin =
+            try (Admin admin =
                     conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
                 byte[] splitKey = Bytes.toBytes("00Dabcdetenant200B");
-                admin.split(Bytes.toBytes(multiTenantTable), splitKey);
+                admin.split(TableName.valueOf(multiTenantTable), splitKey);
             }
 
             /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
index a5aa27e..6ebf344 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
@@ -29,13 +29,14 @@ import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.util.Pair;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index 612bf3b..fcf89a0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
@@ -200,7 +200,7 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
     // call flush to create new files in the region
     admin.flush(TableName.valueOf(tableName));
 
-    List<HBaseProtos.SnapshotDescription> snapshots = admin.listSnapshots();
+    List<SnapshotDescription> snapshots = admin.listSnapshots();
     Assert.assertEquals(tableName, snapshots.get(0).getTable());
 
     // upsert data after snapshot

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index 9b06955..8894b84 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -40,13 +40,10 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
@@ -298,11 +295,9 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
     // used to create an index while a batch of rows are being written
     public static class CreateIndexRegionObserver extends SimpleRegionObserver {
         @Override
-        public void postPut(ObserverContext<RegionCoprocessorEnvironment> c,
-                Put put, WALEdit edit, final Durability durability)
-                        throws HBaseIOException {
-            String tableName = c.getEnvironment().getRegion().getRegionInfo()
-                    .getTable().getNameAsString();
+        public void postPut(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
+                Put put, org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException {
+            String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
             if (tableName.equalsIgnoreCase(TABLE_NAME)
                     // create the index after the second batch  
                     && Bytes.startsWith(put.getRow(), Bytes.toBytes("varchar200_upsert_select"))) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 1f4526d..8c7fccf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -148,10 +148,10 @@ public class MutableIndexFailureIT extends BaseTest {
         NUM_SLAVES_BASE = 4;
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
         indexRebuildTaskRegionEnvironment =
-                (RegionCoprocessorEnvironment) getUtility()
+                 getUtility()
                         .getRSForFirstRegionInTable(
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
-                        .getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+                        .getRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
                         .get(0).getCoprocessorHost()
                         .findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
         MetaDataRegionObserver.initRebuildIndexConnectionProps(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index c2ae7f2..21d1ff0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -96,10 +96,10 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
         serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, Long.toString(WAIT_AFTER_DISABLED));
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), ReadOnlyProps.EMPTY_PROPS);
         indexRebuildTaskRegionEnvironment =
-                (RegionCoprocessorEnvironment) getUtility()
+               getUtility()
                         .getRSForFirstRegionInTable(
                             PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
-                        .getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+                        .getRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
                         .get(0).getCoprocessorHost()
                         .findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
         MetaDataRegionObserver.initRebuildIndexConnectionProps(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
index ec60151..5fae9fd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
@@ -33,13 +33,11 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.apache.phoenix.hbase.index.Indexer;
@@ -182,8 +180,8 @@ public class TxWriteFailureIT extends BaseUniqueNamesOwnClusterIT {
 	
 	public static class FailingRegionObserver extends SimpleRegionObserver {
         @Override
-        public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
-                final Durability durability) throws HBaseIOException {
+        public void prePut(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, Put put,
+                org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException {
             if (shouldFailUpsert(c, put)) {
                 // throwing anything other than instances of IOException result
                 // in this coprocessor being unloaded

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index 10fd7f8..2ceac55 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -33,7 +33,6 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
@@ -49,8 +48,8 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.phoenix.end2end.BaseOwnClusterIT;
 import org.apache.phoenix.hbase.index.Indexer;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
index 776e300..bb10ae0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
@@ -17,14 +17,23 @@
  */
 package org.apache.phoenix.replication;
 
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -39,11 +48,6 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.List;
-import java.util.Properties;
-import java.util.UUID;
-
 
 public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
 
@@ -112,7 +116,7 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
   public void testOtherTablesAutoPass() throws Exception {
     //Cell is nonsense but we should auto pass because the table name's not System.Catalog
     WAL.Entry entry = new WAL.Entry(new WALKey(REGION,
-        TableName.valueOf(TestUtil.ENTITY_HISTORY_TABLE_NAME)), new WALEdit());
+        TableName.valueOf(TestUtil.ENTITY_HISTORY_TABLE_NAME), System.currentTimeMillis()), new WALEdit());
     entry.getEdit().add(CellUtil.createCell(Bytes.toBytes("foo")));
     SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
     Assert.assertEquals(1, filter.filter(entry).getEdit().size());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
index fb29985..cc3c1d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.rpc;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcScheduler;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
@@ -28,11 +29,13 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.mockito.Mockito;
 
 public class TestPhoenixIndexRpcSchedulerFactory extends PhoenixRpcSchedulerFactory {
-    
+    private static Abortable abortable = new AbortServer();
+    private static final Configuration conf = HBaseConfiguration.create();
+    private static PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class);
     private static RpcExecutor indexRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-index-queue", 30, 1,
-            300));
+            qosFunction,conf,abortable));
     private static RpcExecutor metadataRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-metataqueue", 30,
-            1, 300));
+            1, qosFunction,conf,abortable));
 
     @Override
     public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
@@ -47,6 +50,20 @@ public class TestPhoenixIndexRpcSchedulerFactory extends PhoenixRpcSchedulerFact
         return create(configuration, priorityFunction, null);
     }
     
+    private static class AbortServer implements Abortable {
+        private boolean aborted = false;
+
+        @Override
+        public void abort(String why, Throwable e) {
+            aborted = true;
+        }
+
+        @Override
+        public boolean isAborted() {
+            return aborted;
+        }
+    }
+    
     public static RpcExecutor getIndexRpcExecutor() {
         return indexRpcExecutor;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index dfd3774..8247496 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -79,7 +79,12 @@ public class IndexMemStore implements KeyValueStore {
   private CellComparator comparator;
 
   public IndexMemStore() {
-    this(CellComparatorImpl.COMPARATOR);
+    this(new CellComparatorImpl(){
+        @Override
+        public int compare(Cell a, Cell b) {
+            return super.compare(a, b, true);
+        }
+    });
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index cf8279a..5fb8b1c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -189,8 +189,9 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                 }
 
                 private void throwFailureIfDone() throws SingleIndexWriteFailureException {
-                    if (stopped.isStopped() || env.getConnection() == null || env.getConnection().isClosed()
-                            || env.getConnection().isAborted()
+                    if (stopped.isStopped()
+                            || (env != null && (env.getConnection() == null || env.getConnection().isClosed()
+                            || env.getConnection().isAborted()))
                             || Thread.currentThread().isInterrupted()) { throw new SingleIndexWriteFailureException(
                                     "Pool closed, not attempting to write to the index!", null); }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
index e49865e..ed4104b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilder.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
@@ -76,11 +77,11 @@ public class PhoenixKeyValueUtil {
     
     public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, 
         int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value, 
-        int valueOffset, int valueLength) {
+        int valueOffset, int valueLength,CellBuilder.DataType type) {
         return CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
                 .setRow(key, keyOffset, keyLength).setFamily(cf, cfOffset, cfLength)
                 .setQualifier(cq, cqOffset, cqLength).setTimestamp(ts)
-                .setValue(value, valueOffset, valueLength).build();
+                .setValue(value, valueOffset, valueLength).setType(type).build();
     }
     
 	public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
index 5383d9b..f2d9c03 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
@@ -31,6 +31,7 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -159,7 +160,7 @@ public class UnnestArrayPlanTest {
         for (Object[] array : arrays) {
             PhoenixArray pArray = new PhoenixArray(baseType, array);
             byte[] bytes = arrayType.toBytes(pArray);            
-            tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
+            tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0,CellBuilder.DataType.Put)));
         }
         
         return tuples;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
index 6494db2..6a54b93 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
@@ -28,10 +28,11 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.phoenix.hbase.index.Indexer;
 
 /**
- * Helper to build the configuration for the {@link NonTxIndexBuilder}.
+ * Helper to build the configuration for the {@link NonTxIndexker}.
  * <p>
  * This class is NOT thread-safe; all concurrent access must be managed externally.
  */
@@ -135,7 +136,9 @@ public class CoveredColumnIndexSpecifierBuilder {
     // add the codec for the index to the map of options
     Map<String, String> opts = this.convertToMap();
     opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName());
-   return Indexer.enableIndexing(desc, NonTxIndexBuilder.class, opts, Coprocessor.PRIORITY_USER);
+        TableDescriptorBuilder newBuilder = TableDescriptorBuilder.newBuilder(desc);
+        Indexer.enableIndexing(newBuilder, NonTxIndexBuilder.class, opts, Coprocessor.PRIORITY_USER);
+        return newBuilder.build();
   }
 
   public static List<ColumnGroup> getColumns(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
index bcd5666..0f5f995 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.hbase.index.covered.data;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
@@ -38,7 +39,12 @@ public class TestIndexMemStore {
 
   @Test
   public void testCorrectOverwritting() throws Exception {
-    IndexMemStore store = new IndexMemStore(CellComparatorImpl.COMPARATOR);
+    IndexMemStore store = new IndexMemStore(new CellComparatorImpl(){
+        @Override
+        public int compare(Cell a, Cell b) {
+            return super.compare(a, b, true);
+        }
+    });
     long ts = 10;
     KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val);
     kv.setSequenceId(2);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 57e3ba1..af45dad 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -111,24 +112,23 @@ public class TestIndexWriter {
 
     Table table = Mockito.mock(Table.class);
     final boolean[] completed = new boolean[] { false };
-    Mockito.when(table.batch(Mockito.anyList(), Mockito.anyList())).thenAnswer(new Answer<Void>() {
-
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        // just keep track that it was called
-        completed[0] = true;
-        return null;
-      }
-    });
+        Mockito.doAnswer(new Answer<Void>() {
+            @Override
+            public Void answer(InvocationOnMock invocation) throws Throwable {
+                // just keep track that it was called
+                completed[0] = true;
+                return null;
+            }
+        }).when(table).batch(Mockito.anyList(), Mockito.any());
     Mockito.when(table.getName()).thenReturn(TableName.valueOf(testName.getTableName()));
     // add the table to the set of tables, so its returned to the writer
     tables.put(new ImmutableBytesPtr(tableName), table);
 
     // setup the writer and failure policy
     TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
-    committer.setup(factory, exec, abort, stop, e);
+    committer.setup(factory, exec, stop, e);
     KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
-    policy.setup(stop, abort);
+    policy.setup(stop, e);
     IndexWriter writer = new IndexWriter(committer, policy);
     writer.write(indexUpdates);
     assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
@@ -163,21 +163,21 @@ public class TestIndexWriter {
     final CountDownLatch writeStartedLatch = new CountDownLatch(1);
     // latch never gets counted down, so we wait forever
     final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
-    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        LOG.info("Write started");
-        writeStartedLatch.countDown();
-        // when we interrupt the thread for shutdown, we should see this throw an interrupt too
-        try {
-        waitOnAbortedLatch.await();
-        } catch (InterruptedException e) {
-          LOG.info("Correctly interrupted while writing!");
-          throw e;
-        }
-        return null;
-      }
-    });
+        Mockito.doAnswer(new Answer<Void>() {
+            @Override
+            public Void answer(InvocationOnMock invocation) throws Throwable {
+                LOG.info("Write started");
+                writeStartedLatch.countDown();
+                // when we interrupt the thread for shutdown, we should see this throw an interrupt too
+                try {
+                    waitOnAbortedLatch.await();
+                } catch (InterruptedException e) {
+                    LOG.info("Correctly interrupted while writing!");
+                    throw e;
+                }
+                return null;
+            }
+        }).when(table).batch(Mockito.anyListOf(Row.class), Mockito.any());
     // add the tables to the set of tables, so its returned to the writer
     tables.put(new ImmutableBytesPtr(tableName), table);
 
@@ -189,9 +189,9 @@ public class TestIndexWriter {
 
     // setup the writer
     TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
-    committer.setup(factory, exec, abort, stop, e );
+    committer.setup(factory, exec, stop, e );
     KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
-    policy.setup(stop, abort);
+    policy.setup(stop, e);
     final IndexWriter writer = new IndexWriter(committer, policy);
 
     final boolean[] failedWrite = new boolean[] { false };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index 1fe0342..cc185ce 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -67,15 +67,14 @@ public class TestParalleIndexWriter {
     FakeTableFactory factory = new FakeTableFactory(
         Collections.<ImmutableBytesPtr, Table> emptyMap());
     TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
-    Abortable mockAbort = Mockito.mock(Abortable.class);
     Stoppable mockStop = Mockito.mock(Stoppable.class);
     // create a simple writer
-    writer.setup(factory, exec, mockAbort, mockStop,e);
+    writer.setup(factory, exec, mockStop,e);
     // stop the writer
     writer.stop(this.test.getTableNameString() + " finished");
     assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
     assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
-    Mockito.verifyZeroInteractions(mockAbort, mockStop);
+    Mockito.verifyZeroInteractions(mockStop);
   }
 
   @SuppressWarnings({ "unchecked", "deprecation" })
@@ -102,7 +101,7 @@ public class TestParalleIndexWriter {
 
     Table table = Mockito.mock(Table.class);
     final boolean[] completed = new boolean[] { false };
-    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
+    Mockito.doAnswer(new Answer<Void>() {
 
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
@@ -110,14 +109,14 @@ public class TestParalleIndexWriter {
         completed[0] = true;
         return null;
       }
-    });
+    }).when(table).batch(Mockito.anyList(),Mockito.any());
     Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
     // add the table to the set of tables, so its returned to the writer
     tables.put(tableName, table);
 
     // setup the writer and failure policy
     TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
-    writer.setup(factory, exec, abort, stop, e);
+    writer.setup(factory, exec, stop, e);
     writer.write(indexUpdates, true);
     assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
       completed[0]);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 79bc295..d8dde82 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -29,7 +29,6 @@ import java.util.concurrent.Executors;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.hbase.index.IndexTableName;
-import org.apache.phoenix.hbase.index.StubAbortable;
 import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.junit.Rule;
@@ -63,27 +61,25 @@ public class TestParalleWriterIndexCommitter {
     FakeTableFactory factory = new FakeTableFactory(
         Collections.<ImmutableBytesPtr, Table> emptyMap());
     TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
-    Abortable mockAbort = Mockito.mock(Abortable.class);
     Stoppable mockStop = Mockito.mock(Stoppable.class);
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
     Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
     // create a simple writer
-    writer.setup(factory, exec, mockAbort, mockStop, e);
+    writer.setup(factory, exec, mockStop, e);
     // stop the writer
     writer.stop(this.test.getTableNameString() + " finished");
     assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
     assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
-    Mockito.verifyZeroInteractions(mockAbort, mockStop);
+    Mockito.verifyZeroInteractions(mockStop);
   }
 
-  @SuppressWarnings({ "unchecked", "deprecation" })
+  @SuppressWarnings({ "unchecked"})
   @Test
   public void testSynchronouslyCompletesAllWrites() throws Exception {
     LOG.info("Starting " + test.getTableNameString());
     LOG.info("Current thread is interrupted: " + Thread.interrupted());
-    Abortable abort = new StubAbortable();
     RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
     Configuration conf =new Configuration();
     Mockito.when(e.getConfiguration()).thenReturn(conf);
@@ -103,22 +99,21 @@ public class TestParalleWriterIndexCommitter {
 
     Table table = Mockito.mock(Table.class);
     final boolean[] completed = new boolean[] { false };
-    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
-
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        // just keep track that it was called
-        completed[0] = true;
-        return null;
-      }
-    });
-    Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
+        Mockito.doAnswer(new Answer<Void>() {
+            @Override
+            public Void answer(InvocationOnMock invocation) throws Throwable {
+                // just keep track that it was called
+                completed[0] = true;
+                return null;
+            }
+        }).when(table).batch(Mockito.anyList(), Mockito.any());
+        Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
     // add the table to the set of tables, so its returned to the writer
     tables.put(tableName, table);
 
     // setup the writer and failure policy
     TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
-    writer.setup(factory, exec, abort, stop, e);
+    writer.setup(factory, exec, stop, e);
     writer.write(indexUpdates, true);
     assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
       completed[0]);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 1ace4c5..f6dd85c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -47,9 +47,11 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -203,7 +205,7 @@ public class TestWALRecoveryCaching {
 
     // kill the server where the tables live - this should trigger distributed log splitting
     // find the regionserver that matches the passed server
-    List<Region> online = new ArrayList<Region>();
+    List<HRegion> online = new ArrayList<HRegion>();
     online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
       testTable.getTableName()));
     online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
index 0570826..8a2e2b7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
@@ -49,7 +49,7 @@ public class ConnectionQueryServicesImplTest {
         when(cqs.createSchema(any(List.class), anyString())).thenCallRealMethod();
         doCallRealMethod().when(cqs).ensureSystemTablesMigratedToSystemNamespace(any(ReadOnlyProps.class));
         // Do nothing for this method, just check that it was invoked later
-        doNothing().when(cqs).createSysMutexTable(any(HBaseAdmin.class), any(ReadOnlyProps.class));
+        doNothing().when(cqs).createSysMutexTable(any(Admin.class), any(ReadOnlyProps.class));
 
         // Spoof out this call so that ensureSystemTablesUpgrade() will return-fast.
         when(cqs.getSystemTableNames(any(Admin.class))).thenReturn(Collections.<TableName> emptyList());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
index 177aff3..7ccaa8e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
@@ -28,7 +28,9 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -37,6 +39,7 @@ import org.apache.phoenix.schema.stats.StatisticsScanner.StatisticsScannerCallab
 import org.junit.Before;
 import org.junit.Test;
 
+
 /**
  * Test to verify that we don't try to update stats when a RS is stopping.
  */
@@ -53,6 +56,8 @@ public class StatisticsScannerTest {
     private RegionInfo regionInfo;
 
     private Configuration config;
+    private RegionCoprocessorEnvironment env;
+    private Connection conn;
 
     @Before
     public void setupMocks() throws Exception {
@@ -68,6 +73,8 @@ public class StatisticsScannerTest {
         this.tracker = mock(StatisticsCollector.class);
         this.delegate = mock(InternalScanner.class);
         this.regionInfo = mock(RegionInfo.class);
+        this.env = mock(RegionCoprocessorEnvironment.class);
+        this.conn = mock(Connection.class);
 
         // Wire up the mocks to the mock StatisticsScanner
         when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter);
@@ -77,6 +84,8 @@ public class StatisticsScannerTest {
         when(mockScanner.getConfig()).thenReturn(config);
         when(mockScanner.getTracker()).thenReturn(tracker);
         when(mockScanner.getDelegate()).thenReturn(delegate);
+        when(env.getConnection()).thenReturn(conn);
+        when(mockScanner.getConnection()).thenReturn(conn);
 
         // Wire up the HRegionInfo mock to the Region mock
         when(region.getRegionInfo()).thenReturn(regionInfo);
@@ -87,25 +96,25 @@ public class StatisticsScannerTest {
 
     @Test
     public void testCheckRegionServerStoppingOnClose() throws Exception {
-        when(rsServices.isStopping()).thenReturn(true);
-        when(rsServices.isStopped()).thenReturn(false);
+        when(conn.isClosed()).thenReturn(true);
+        when(conn.isAborted()).thenReturn(false);
 
         mockScanner.close();
 
-        verify(rsServices).isStopping();
+        verify(conn).isClosed();
         verify(callable, never()).call();
         verify(runTracker, never()).runTask(callable);
     }
 
     @Test
     public void testCheckRegionServerStoppedOnClose() throws Exception {
-        when(rsServices.isStopping()).thenReturn(false);
-        when(rsServices.isStopped()).thenReturn(true);
+        when(conn.isClosed()).thenReturn(false);
+        when(conn.isAborted()).thenReturn(true);
 
         mockScanner.close();
 
-        verify(rsServices).isStopping();
-        verify(rsServices).isStopped();
+        verify(conn).isClosed();
+        verify(conn).isAborted();
         verify(callable, never()).call();
         verify(runTracker, never()).runTask(callable);
     }
@@ -116,13 +125,13 @@ public class StatisticsScannerTest {
         StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable();
         doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(Region.class), any(StatisticsCollector.class),
                 any(ImmutableBytesPtr.class), any(List.class));
-        when(rsServices.isStopping()).thenReturn(true);
-        when(rsServices.isStopped()).thenReturn(false);
+        when(conn.isClosed()).thenReturn(true);
+        when(conn.isAborted()).thenReturn(false);
 
         // Should not throw an exception
         realCallable.call();
 
-        verify(rsServices).isStopping();
+        verify(conn).isClosed();
     }
 
     @SuppressWarnings("unchecked")
@@ -131,13 +140,13 @@ public class StatisticsScannerTest {
         StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable();
         doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(Region.class), any(StatisticsCollector.class),
                 any(ImmutableBytesPtr.class), any(List.class));
-        when(rsServices.isStopping()).thenReturn(false);
-        when(rsServices.isStopped()).thenReturn(true);
+        when(conn.isClosed()).thenReturn(false);
+        when(conn.isAborted()).thenReturn(true);
 
         // Should not throw an exception
         realCallable.call();
 
-        verify(rsServices).isStopping();
-        verify(rsServices).isStopped();
+        verify(conn).isClosed();
+        verify(conn).isAborted();
     }
 }