You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/10/07 11:23:44 UTC

[1/3] git commit: Rename StatisticsUtils to follow conventions and other misc cleanup

Repository: phoenix
Updated Branches:
  refs/heads/4.0 f21bf2a05 -> a60dc35ac


Rename StatisticsUtils to follow conventions and other misc cleanup


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4281a9cf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4281a9cf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4281a9cf

Branch: refs/heads/4.0
Commit: 4281a9cfd944f7812eaa6ce21ef76f2e8bbe2a1b
Parents: f21bf2a
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 7 01:06:23 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 7 01:08:19 2014 -0700

----------------------------------------------------------------------
 .../coprocessor/MetaDataEndpointImpl.java       |  44 ++++---
 .../phoenix/schema/stat/StatisticsTable.java    |   4 +-
 .../phoenix/schema/stat/StatisticsUtil.java     | 117 +++++++++++++++++
 .../phoenix/schema/stat/StatisticsUtils.java    | 131 -------------------
 .../java/org/apache/phoenix/query/BaseTest.java |   1 -
 .../phoenix/query/QueryServicesTestImpl.java    |   2 -
 6 files changed, 143 insertions(+), 156 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4281a9cf/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 4fe9a2b..2e6f0b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -61,8 +61,6 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -132,7 +130,7 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.stat.PTableStats;
-import org.apache.phoenix.schema.stat.StatisticsUtils;
+import org.apache.phoenix.schema.stat.StatisticsUtil;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
@@ -272,8 +270,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     }
 
     private RegionCoprocessorEnvironment env;
-         
-    private static final Log LOG = LogFactory.getLog(MetaDataEndpointImpl.class);
 
     /**
      * Stores a reference to the coprocessor environment provided by the
@@ -292,7 +288,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         } else {
             throw new CoprocessorException("Must be loaded on a table region!");
         }
-        LOG.info("Starting Tracing-Metrics Systems");
+        logger.info("Starting Tracing-Metrics Systems");
         // Start the phoenix trace collection
         Tracing.addTraceMetricsSource();
         Metrics.ensureConfigured();
@@ -398,20 +394,20 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 return table;
             }
             // Query for the latest table first, since it's not cached
-            table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientTimeStamp);
+            table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP);
             if (table != null && table.getTimeStamp() < clientTimeStamp) {
                 return table;
             }
             // Otherwise, query for an older version of the table - it won't be cached
-            return buildTable(key, cacheKey, region, clientTimeStamp, clientTimeStamp);
+            return buildTable(key, cacheKey, region, clientTimeStamp);
         } finally {
             rowLock.release();
         }
     }
 
     private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
-            long buildAsOfTimeStamp, long clientTimeStamp) throws IOException, SQLException {
-        Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, buildAsOfTimeStamp);
+            long clientTimeStamp) throws IOException, SQLException {
+        Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
         RegionScanner scanner = region.getScanner(scan);
 
         Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
@@ -419,7 +415,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             PTable oldTable = metaDataCache.getIfPresent(cacheKey);
             long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP-1 : oldTable.getTimeStamp();
             PTable newTable;
-            newTable = getTable(scanner, buildAsOfTimeStamp, tableTimeStamp, clientTimeStamp);
+            newTable = getTable(scanner, clientTimeStamp, tableTimeStamp);
             if (newTable == null) {
                 return null;
             }
@@ -517,7 +513,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         columns.add(column);
     }
     
-    private PTable getTable(RegionScanner scanner, long buildAsOfTimeStamp, long tableTimeStamp, long clientTimeStamp)
+    private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableTimeStamp)
         throws IOException, SQLException {
         List<Cell> results = Lists.newArrayList();
         scanner.next(results);
@@ -652,7 +648,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
           if (colName.getString().isEmpty() && famName != null) {
               LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
               if (linkType == LinkType.INDEX_TABLE) {
-                  addIndexToTable(tenantId, schemaName, famName, tableName, buildAsOfTimeStamp, indexes);
+                  addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp, indexes);
               } else if (linkType == LinkType.PHYSICAL_TABLE) {
                   physicalTables.add(famName);
               } else {
@@ -664,10 +660,18 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
         PName physicalTableName = physicalTables.isEmpty() ? PNameFactory.newName(SchemaUtil.getTableName(
                 schemaName.getString(), tableName.getString())) : physicalTables.get(0);
-        PTableStats stats = tenantId == null ? StatisticsUtils.readStatistics(
-                ServerUtil.getHTableForCoprocessorScan(env, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME),
-                physicalTableName.getBytes(), 
-                clientTimeStamp) : null;
+        PTableStats stats = null;
+        if (tenantId == null) {
+            HTableInterface statsHTable = null;
+            try {
+                statsHTable = ServerUtil.getHTableForCoprocessorScan(env, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
+                stats = StatisticsUtil.readStatistics(statsHTable, physicalTableName.getBytes(), clientTimeStamp);
+            } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
+                logger.warn(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " not online yet?");
+            } finally {
+                if (statsHTable != null) statsHTable.close();
+            }
+        }
         return PTableImpl.makePTable(tenantId, schemaName, tableName, tableType, indexState, timeStamp, 
             tableSeqNum, pkName, saltBucketNum, columns, tableType == INDEX ? dataTableName : null, 
             indexes, isImmutableRows, physicalTables, defaultFamilyName, viewStatement, disableWAL, 
@@ -715,7 +719,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
         PTable table = metaDataCache.getIfPresent(cacheKey);
         // We always cache the latest version - fault in if not in cache
-        if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp, clientTimeStamp)) != null) {
+        if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp)) != null) {
             return table;
         }
         // if not found then check if newer table already exists and add delete marker for timestamp
@@ -1015,7 +1019,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
        
         // We always cache the latest version - fault in if not in cache
         if (table != null
-                || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientTimeStamp)) != null) {
+                || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP)) != null) {
             if (table.getTimeStamp() < clientTimeStamp) {
                 if (isTableDeleted(table) || tableType != table.getType()) {
                     return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
@@ -1172,7 +1176,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 // Get client timeStamp from mutations
                 long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
                 if (table == null
-                        && (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientTimeStamp)) == null) {
+                        && (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP)) == null) {
                     // if not found then call newerTableExists and add delete marker for timestamp
                     // found
                     if (buildDeletedTable(key, cacheKey, region, clientTimeStamp) != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4281a9cf/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
index fba8df7..3c0f376 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
@@ -101,7 +101,7 @@ public class StatisticsTable implements Closeable {
     public void addStats(String regionName, StatisticsCollector tracker, String fam, List<Mutation> mutations) throws IOException {
         if (tracker == null) { return; }
 
-        byte[] prefix = StatisticsUtils.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
+        byte[] prefix = StatisticsUtil.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
                 PDataType.VARCHAR.toBytes(regionName));
         Put put = new Put(prefix);
         if (tracker.getGuidePosts(fam) != null) {
@@ -160,7 +160,7 @@ public class StatisticsTable implements Closeable {
     
     public void deleteStats(String regionName, StatisticsCollector tracker, String fam, List<Mutation> mutations)
             throws IOException {
-        byte[] prefix = StatisticsUtils.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
+        byte[] prefix = StatisticsUtil.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
                 PDataType.VARCHAR.toBytes(regionName));
         mutations.add(new Delete(prefix, clientTimeStamp - 1));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4281a9cf/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java
new file mode 100644
index 0000000..c48d62f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.stat;
+import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.PhoenixArray;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
+
+import com.google.common.collect.Lists;
+/**
+ * Simple utility class for managing multiple key parts of the statistic
+ */
+public class StatisticsUtil {
+    private StatisticsUtil() {
+        // private ctor for utility classes
+    }
+
+    /** Number of parts in our complex key */
+    protected static final int NUM_KEY_PARTS = 3;
+
+    public static byte[] getRowKey(byte[] table, byte[] fam, byte[] region) {
+        // always starts with the source table
+        byte[] rowKey = new byte[table.length + fam.length + region.length + 2];
+        int offset = 0;
+        System.arraycopy(table, 0, rowKey, offset, table.length);
+        offset += table.length;
+        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
+        System.arraycopy(fam, 0, rowKey, offset, fam.length);
+        offset += fam.length;
+        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
+        System.arraycopy(region, 0, rowKey, offset, region.length);
+        return rowKey;
+    }
+    
+    public static byte[] copyRow(KeyValue kv) {
+        return Arrays.copyOfRange(kv.getRowArray(), kv.getRowOffset(), kv.getRowOffset() + kv.getRowLength());
+    }
+
+    public static PTableStats readStatistics(HTableInterface statsHTable, byte[] tableNameBytes, long clientTimeStamp) throws IOException {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        Scan s = MetaDataUtil.newTableRowsScan(tableNameBytes, MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp);
+        s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
+        ResultScanner scanner = statsHTable.getScanner(s);
+        Result result = null;
+        TreeMap<byte[], List<byte[]>> guidePostsPerCf = new TreeMap<byte[], List<byte[]>>(Bytes.BYTES_COMPARATOR);
+        while ((result = scanner.next()) != null) {
+            CellScanner cellScanner = result.cellScanner();
+            while (cellScanner.advance()) {
+                Cell current = cellScanner.current();
+                int tableNameLength = tableNameBytes.length + 1;
+                int cfOffset = current.getRowOffset() + tableNameLength;
+                int cfLength = getVarCharLength(current.getRowArray(), cfOffset, current.getRowLength() - tableNameLength);
+                ptr.set(current.getRowArray(), cfOffset, cfLength);
+                byte[] cfName = ByteUtil.copyKeyBytesIfNecessary(ptr);
+                PhoenixArray array = (PhoenixArray)PDataType.VARBINARY_ARRAY.toObject(current.getValueArray(), current.getValueOffset(), current
+                        .getValueLength());
+                if (array != null && array.getDimensions() != 0) {
+                    List<byte[]> guidePosts = Lists.newArrayListWithExpectedSize(array.getDimensions());                        
+                    for (int j = 0; j < array.getDimensions(); j++) {
+                        byte[] gp = array.toBytes(j);
+                        if (gp.length != 0) {
+                            guidePosts.add(gp);
+                        }
+                    }
+                    List<byte[]> gps = guidePostsPerCf.put(cfName, guidePosts);
+                    if (gps != null) { // Add guidepost already there from other regions
+                        guidePosts.addAll(gps);
+                    }
+                }
+            }
+        }
+        if (!guidePostsPerCf.isEmpty()) {
+            // Sort guideposts, as the order above will depend on the order we traverse
+            // each region's worth of guideposts above.
+            for (List<byte[]> gps : guidePostsPerCf.values()) {
+                Collections.sort(gps, Bytes.BYTES_COMPARATOR);
+            }
+            return new PTableStatsImpl(guidePostsPerCf);
+        }
+        return PTableStatsImpl.NO_STATS;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4281a9cf/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
deleted file mode 100644
index b45dfbf..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.schema.stat;
-import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.coprocessor.MetaDataProtocol;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PDataType;
-import org.apache.phoenix.schema.PhoenixArray;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.MetaDataUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-/**
- * Simple utility class for managing multiple key parts of the statistic
- */
-public class StatisticsUtils {
-    private static final Logger logger = LoggerFactory.getLogger(StatisticsUtils.class);
-
-    private StatisticsUtils() {
-        // private ctor for utility classes
-    }
-
-    /** Number of parts in our complex key */
-    protected static final int NUM_KEY_PARTS = 3;
-
-    public static byte[] getRowKey(byte[] table, byte[] fam, byte[] region) {
-        // always starts with the source table
-        byte[] rowKey = new byte[table.length + fam.length + region.length + 2];
-        int offset = 0;
-        System.arraycopy(table, 0, rowKey, offset, table.length);
-        offset += table.length;
-        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
-        System.arraycopy(fam, 0, rowKey, offset, fam.length);
-        offset += fam.length;
-        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
-        System.arraycopy(region, 0, rowKey, offset, region.length);
-        return rowKey;
-    }
-    
-    public static byte[] copyRow(KeyValue kv) {
-        return Arrays.copyOfRange(kv.getRowArray(), kv.getRowOffset(), kv.getRowOffset() + kv.getRowLength());
-    }
-
-    public static PTableStats readStatistics(HTableInterface statsHTable, byte[] tableNameBytes, long clientTimeStamp) throws IOException {
-        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-        try {
-            Scan s = MetaDataUtil.newTableRowsScan(tableNameBytes, MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp);
-            s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
-            ResultScanner scanner = statsHTable.getScanner(s);
-            Result result = null;
-            TreeMap<byte[], List<byte[]>> guidePostsPerCf = new TreeMap<byte[], List<byte[]>>(Bytes.BYTES_COMPARATOR);
-            while ((result = scanner.next()) != null) {
-                CellScanner cellScanner = result.cellScanner();
-                while (cellScanner.advance()) {
-                    Cell current = cellScanner.current();
-                    int tableNameLength = tableNameBytes.length + 1;
-                    int cfOffset = current.getRowOffset() + tableNameLength;
-                    int cfLength = getVarCharLength(current.getRowArray(), cfOffset, current.getRowLength() - tableNameLength);
-                    ptr.set(current.getRowArray(), cfOffset, cfLength);
-                    byte[] cfName = ByteUtil.copyKeyBytesIfNecessary(ptr);
-                    PhoenixArray array = (PhoenixArray)PDataType.VARBINARY_ARRAY.toObject(current.getValueArray(), current.getValueOffset(), current
-                            .getValueLength());
-                    if (array != null && array.getDimensions() != 0) {
-                        List<byte[]> guidePosts = Lists.newArrayListWithExpectedSize(array.getDimensions());                        
-                        for (int j = 0; j < array.getDimensions(); j++) {
-                            byte[] gp = array.toBytes(j);
-                            if (gp.length != 0) {
-                                guidePosts.add(gp);
-                            }
-                        }
-                        List<byte[]> gps = guidePostsPerCf.put(cfName, guidePosts);
-                        if (gps != null) { // Add guidepost already there from other regions
-                            guidePosts.addAll(gps);
-                        }
-                    }
-                }
-            }
-            if (!guidePostsPerCf.isEmpty()) {
-                // Sort guideposts, as the order above will depend on the order we traverse
-                // each region's worth of guideposts above.
-                for (List<byte[]> gps : guidePostsPerCf.values()) {
-                    Collections.sort(gps, Bytes.BYTES_COMPARATOR);
-                }
-                return new PTableStatsImpl(guidePostsPerCf);
-            }
-        } catch (Exception e) {
-            if (e instanceof org.apache.hadoop.hbase.TableNotFoundException) {
-                logger.warn("Stats table not yet online", e);
-            } else {
-                throw new IOException(e);
-            }
-        } finally {
-            statsHTable.close();
-        }
-        return PTableStatsImpl.NO_STATS;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4281a9cf/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 96257f6..653c90d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -599,7 +599,6 @@ public abstract class BaseTest {
         conf.setInt("hbase.hlog.asyncer.number", 2);
         conf.setInt("hbase.assignment.zkevent.workers", 5);
         conf.setInt("hbase.assignment.threads.max", 5);
-        conf.setInt(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 20);
         return conf;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4281a9cf/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index 3151588..a7d34da 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -52,7 +52,6 @@ public final class QueryServicesTestImpl extends BaseQueryServicesImpl {
     public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE =  1024L*1024L*4L; // 4 Mb
     public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE =  1024L*1024L*2L; // 2 Mb
     public static final long DEFAULT_STATS_HISTOGRAM_DEPTH_BYTES = 2000;
-    public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 10000;
     public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0;
 
     
@@ -62,7 +61,6 @@ public final class QueryServicesTestImpl extends BaseQueryServicesImpl {
     
     private static QueryServicesOptions getDefaultServicesOptions() {
     	return withDefaults()
-    	        .setStatsUpdateFrequencyMs(DEFAULT_STATS_UPDATE_FREQ_MS)
                 .setMinStatsUpdateFrequencyMs(DEFAULT_MIN_STATS_UPDATE_FREQ_MS)
     	        .setStatsHistogramDepthBytes(DEFAULT_STATS_HISTOGRAM_DEPTH_BYTES)
                 .setThreadPoolSize(DEFAULT_THREAD_POOL_SIZE)


[2/3] git commit: Atomically increment table timestamp and fix unit tests

Posted by ja...@apache.org.
Atomically increment table timestamp and fix unit tests

Conflicts:
	phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
	phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
	phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
	phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/450400f4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/450400f4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/450400f4

Branch: refs/heads/4.0
Commit: 450400f47c73e3ea80e6c4635ed044be8d7722b6
Parents: 4281a9c
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 6 19:47:29 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 7 01:43:25 2014 -0700

----------------------------------------------------------------------
 .../end2end/BaseTenantSpecificTablesIT.java     |   2 +-
 .../end2end/TenantSpecificTablesDMLIT.java      |  18 +-
 .../coprocessor/MetaDataEndpointImpl.java       | 193 +++++-----
 .../coprocessor/generated/MetaDataProtos.java   | 351 ++++++++++---------
 .../phoenix/query/ConnectionQueryServices.java  |   2 +-
 .../query/ConnectionQueryServicesImpl.java      |  17 +-
 .../query/ConnectionlessQueryServicesImpl.java  |   2 +-
 .../query/DelegateConnectionQueryServices.java  |   4 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  18 +-
 phoenix-protocol/src/main/MetaDataService.proto |   8 +-
 10 files changed, 320 insertions(+), 295 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
index b8fa035..fe08ac5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
@@ -84,7 +84,7 @@ public abstract class BaseTenantSpecificTablesIT extends BaseClientManagedTimeIT
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20l));
+        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
         PHOENIX_JDBC_TENANT_SPECIFIC_URL = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID;
         PHOENIX_JDBC_TENANT_SPECIFIC_URL2 = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID2;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
index b01c0ab..21f80f9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
@@ -53,6 +53,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.close();
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             analyzeTable(conn, TENANT_TABLE_NAME);
+            conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             ResultSet rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME + " where id = 1");
             assertTrue("Expected 1 row in result set", rs.next());
             assertEquals("Cheap Sunglasses", rs.getString(1));
@@ -75,6 +76,8 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn1.commit();
             conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             analyzeTable(conn1, TENANT_TABLE_NAME);
+            conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+            
             conn2.setAutoCommit(true);
             conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('them','" + TENANT_TYPE_ID + "',1,'Long Hair')");
             conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('us','" + TENANT_TYPE_ID + "',2,'Black Hat')");
@@ -88,7 +91,8 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             assertFalse("Expected 1 row in result set", rs.next());
             conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
             analyzeTable(conn2, TENANT_TABLE_NAME);
-            rs = conn2.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME );
+            conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
+
             rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME + " where id = 2");
             assertTrue("Expected 1 row in result set", rs.next());
             assertEquals(2, rs.getInt(3));
@@ -143,6 +147,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             assertTrue("Expected 1 row in result set", rs.next());
             assertEquals(2, rs.getInt(3));
             assertEquals("Viva Las Vegas", rs.getString(4));
+            
             List<KeyRange> splits = getAllSplits(conn1, TENANT_TABLE_NAME);
             assertEquals(3, splits.size());
         }
@@ -179,6 +184,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             analyzeTable(conn, TENANT_TABLE_NAME);
+            conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             ResultSet rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME + " join foo on k=id");
             assertTrue("Expected 1 row in result set", rs.next());
             assertEquals("Cheap Sunglasses", rs.getString(1));
@@ -250,6 +256,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             
             conn = nextConnection(getUrl());
             analyzeTable(conn, PARENT_TABLE_NAME);
+            conn = nextConnection(getUrl());
             rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME);
             rs.next();
             assertEquals(2, rs.getInt(1));
@@ -311,6 +318,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             analyzeTable(conn, PARENT_TABLE_NAME);
+            conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             conn.createStatement().execute("delete from " + TENANT_TABLE_NAME);
             conn.commit();
             conn.close();
@@ -346,6 +354,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             
             conn = nextConnection(getUrl());
             analyzeTable(conn, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
+            conn = nextConnection(getUrl());
             ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
             rs.next();
             assertEquals(3, rs.getInt(1));
@@ -371,8 +380,9 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
-            conn.setAutoCommit(true);
             analyzeTable(conn, TENANT_TABLE_NAME);
+            conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+            conn.setAutoCommit(true);
             int count = conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + "(id, user) select id+100, user from " + TENANT_TABLE_NAME);
             assertEquals("Expected 1 row to have been inserted", 1, count);
             conn.close();
@@ -406,8 +416,9 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
-            conn.setAutoCommit(true);
             analyzeTable(conn, TENANT_TABLE_NAME);
+            conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+            conn.setAutoCommit(true);
             int count = conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + "(id, user) select id+100, user from ANOTHER_TENANT_TABLE where id=2");
             assertEquals("Expected 1 row to have been inserted", 1, count);
             conn.close();
@@ -455,6 +466,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.close();
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             analyzeTable(conn, PARENT_TABLE_NAME);
+            conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             rs = conn.createStatement().executeQuery("select user from " + PARENT_TABLE_NAME);
             assertTrue(rs.next());
             assertEquals(rs.getString(1),"Billy Gibbons");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 2e6f0b4..09ca2dd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -92,8 +92,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.AddColumnRequest;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest;
@@ -102,6 +100,8 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.DropTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
@@ -144,7 +144,6 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.cache.Cache;
 import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
 import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
@@ -347,64 +346,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private PTable doGetTable(byte[] key, long clientTimeStamp) throws IOException, SQLException {
-        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
-        Cache<ImmutableBytesPtr, PTable> metaDataCache =
-                GlobalCache.getInstance(this.env).getMetaDataCache();
-        PTable table = metaDataCache.getIfPresent(cacheKey);
-        // We only cache the latest, so we'll end up building the table with every call if the
-        // client connection has specified an SCN.
-        // TODO: If we indicate to the client that we're returning an older version, but there's a
-        // newer version available, the client
-        // can safely not call this, since we only allow modifications to the latest.
-        if (table != null && table.getTimeStamp() < clientTimeStamp) {
-            // Table on client is up-to-date with table on server, so just return
-            if (isTableDeleted(table)) {
-                return null;
-            }
-            return table;
-        }
-        // Ask Lars about the expense of this call - if we don't take the lock, we still won't get
-        // partial results
-        // get the co-processor environment
-        // TODO: check that key is within region.getStartKey() and region.getEndKey()
-        // and return special code to force client to lookup region from meta.
-        HRegion region = env.getRegion();
-        /*
-         * Lock directly on key, though it may be an index table. This will just prevent a table
-         * from getting rebuilt too often.
-         */
-        RowLock rowLock = region.getRowLock(key);
-        if (rowLock == null) {
-            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
-        }
-        try {
-            // Try cache again in case we were waiting on a lock
-            table = metaDataCache.getIfPresent(cacheKey);
-            // We only cache the latest, so we'll end up building the table with every call if the
-            // client connection has specified an SCN.
-            // TODO: If we indicate to the client that we're returning an older version, but there's
-            // a newer version available, the client
-            // can safely not call this, since we only allow modifications to the latest.
-            if (table != null && table.getTimeStamp() < clientTimeStamp) {
-                // Table on client is up-to-date with table on server, so just return
-                if (isTableDeleted(table)) {
-                    return null;
-                }
-                return table;
-            }
-            // Query for the latest table first, since it's not cached
-            table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP);
-            if (table != null && table.getTimeStamp() < clientTimeStamp) {
-                return table;
-            }
-            // Otherwise, query for an older version of the table - it won't be cached
-            return buildTable(key, cacheKey, region, clientTimeStamp);
-        } finally {
-            rowLock.release();
-        }
-    }
-
     private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
             long clientTimeStamp) throws IOException, SQLException {
         Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
@@ -1322,6 +1263,97 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
     
+    private PTable incrementTableTimestamp(byte[] key, long clientTimeStamp) throws IOException, SQLException {
+        HRegion region = env.getRegion();
+        RowLock lid = region.getRowLock(key);
+        if (lid == null) {
+            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
+        }
+        try {
+            PTable table = doGetTable(key, clientTimeStamp, lid);
+            if (table != null) {
+                long tableTimeStamp = table.getTimeStamp() + 1;
+                List<Mutation> mutations = Lists.newArrayListWithExpectedSize(1);
+                Put p = new Put(key);
+                p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, tableTimeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
+                mutations.add(p);
+                region.mutateRowsWithLocks(mutations, Collections.<byte[]> emptySet());
+                
+                Cache<ImmutableBytesPtr, PTable> metaDataCache = GlobalCache.getInstance(env).getMetaDataCache();
+                ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
+                metaDataCache.invalidate(cacheKey);
+            }
+            return table;
+        } finally {
+            lid.release();
+        }
+    }
+    
+    private PTable doGetTable(byte[] key, long clientTimeStamp) throws IOException, SQLException {
+        return doGetTable(key, clientTimeStamp, null);
+    }
+    
+    private PTable doGetTable(byte[] key, long clientTimeStamp, RowLock rowLock) throws IOException, SQLException {
+        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
+        Cache<ImmutableBytesPtr, PTable> metaDataCache =
+                GlobalCache.getInstance(this.env).getMetaDataCache();
+        PTable table = metaDataCache.getIfPresent(cacheKey);
+        // We only cache the latest, so we'll end up building the table with every call if the
+        // client connection has specified an SCN.
+        // TODO: If we indicate to the client that we're returning an older version, but there's a
+        // newer version available, the client
+        // can safely not call this, since we only allow modifications to the latest.
+        if (table != null && table.getTimeStamp() < clientTimeStamp) {
+            // Table on client is up-to-date with table on server, so just return
+            if (isTableDeleted(table)) {
+                return null;
+            }
+            return table;
+        }
+        // Ask Lars about the expense of this call - if we don't take the lock, we still won't get
+        // partial results
+        // get the co-processor environment
+        // TODO: check that key is within region.getStartKey() and region.getEndKey()
+        // and return special code to force client to lookup region from meta.
+        HRegion region = env.getRegion();
+        /*
+         * Lock directly on key, though it may be an index table. This will just prevent a table
+         * from getting rebuilt too often.
+         */
+        final boolean wasLocked = (rowLock != null);
+        if (!wasLocked) {
+            rowLock = region.getRowLock(key);
+            if (rowLock == null) {
+                throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
+            }
+        }
+        try {
+            // Try cache again in case we were waiting on a lock
+            table = metaDataCache.getIfPresent(cacheKey);
+            // We only cache the latest, so we'll end up building the table with every call if the
+            // client connection has specified an SCN.
+            // TODO: If we indicate to the client that we're returning an older version, but there's
+            // a newer version available, the client
+            // can safely not call this, since we only allow modifications to the latest.
+            if (table != null && table.getTimeStamp() < clientTimeStamp) {
+                // Table on client is up-to-date with table on server, so just return
+                if (isTableDeleted(table)) {
+                    return null;
+                }
+                return table;
+            }
+            // Query for the latest table first, since it's not cached
+            table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP);
+            if (table != null && table.getTimeStamp() < clientTimeStamp) {
+                return table;
+            }
+            // Otherwise, query for an older version of the table - it won't be cached
+            return buildTable(key, cacheKey, region, clientTimeStamp);
+        } finally {
+            if (!wasLocked) rowLock.release();
+        }
+    }
+
     @Override
     public void dropColumn(RpcController controller, DropColumnRequest request,
             RpcCallback<MetaDataResponse> done) {
@@ -1672,36 +1704,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     }
     
     @Override
-    public void clearCacheForTable(RpcController controller, ClearCacheForTableRequest request,
-            RpcCallback<ClearCacheForTableResponse> done) {
-        ByteString tenantId = request.getTenantId();
-        ByteString schemaName = request.getSchemaName();
-        ByteString tableName = request.getTableName();
-        byte[] tableKey = SchemaUtil.getTableKey(tenantId.toByteArray(), schemaName.toByteArray(),
-                tableName.toByteArray());
-        ImmutableBytesPtr key = new ImmutableBytesPtr(tableKey);
+    public void incrementTableTimeStamp(RpcController controller, IncrementTableTimeStampRequest request,
+            RpcCallback<IncrementTableTimeStampResponse> done) {
+        byte[] schemaName = request.getSchemaName().toByteArray();
+        byte[] tableName = request.getTableName().toByteArray();
         try {
-            PTable table = doGetTable(tableKey, request.getClientTimestamp());
-            if (table != null) {
-                Cache<ImmutableBytesPtr, PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
-                // Add +1 to the ts
-                // TODO : refactor doGetTable() to do this - optionally increment the timestamp
-                // TODO : clear metadata if it is spread across multiple region servers
-                long ts = table.getTimeStamp() + 1;
-                // Here we could add an empty puti
-                HRegion region = env.getRegion();
-                List<Mutation> mutations = new ArrayList<Mutation>();
-                Put p = new Put(tableKey);
-                p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ts, ByteUtil.EMPTY_BYTE_ARRAY);
-                mutations.add(p);
-                region.mutateRowsWithLocks(mutations, Collections.<byte[]> emptySet());
-                metaDataCache.invalidate(key);
-            }
+            byte[] tenantId = request.getTenantId().toByteArray();
+            long clientTimeStamp = request.getClientTimestamp();
+            byte[] tableKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+            incrementTableTimestamp(tableKey, clientTimeStamp);
         } catch (Throwable t) {
-            // We could still invalidate it
-            logger.error("clearCacheForTable failed to update the latest ts ", t);
-            ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(
-                    SchemaUtil.getTableName(schemaName.toString(), tableName.toString()), t));
+            logger.error("incrementTableTimeStamp failed", t);
+            ProtobufUtil.setControllerException(controller,
+                ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
index 5ac9cb7..8d153b2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
@@ -6316,7 +6316,7 @@ public final class MetaDataProtos {
     // @@protoc_insertion_point(class_scope:GetVersionResponse)
   }
 
-  public interface ClearCacheForTableRequestOrBuilder
+  public interface IncrementTableTimeStampRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
     // required bytes tenantId = 1;
@@ -6360,24 +6360,24 @@ public final class MetaDataProtos {
     long getClientTimestamp();
   }
   /**
-   * Protobuf type {@code ClearCacheForTableRequest}
+   * Protobuf type {@code IncrementTableTimeStampRequest}
    */
-  public static final class ClearCacheForTableRequest extends
+  public static final class IncrementTableTimeStampRequest extends
       com.google.protobuf.GeneratedMessage
-      implements ClearCacheForTableRequestOrBuilder {
-    // Use ClearCacheForTableRequest.newBuilder() to construct.
-    private ClearCacheForTableRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements IncrementTableTimeStampRequestOrBuilder {
+    // Use IncrementTableTimeStampRequest.newBuilder() to construct.
+    private IncrementTableTimeStampRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private ClearCacheForTableRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private IncrementTableTimeStampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final ClearCacheForTableRequest defaultInstance;
-    public static ClearCacheForTableRequest getDefaultInstance() {
+    private static final IncrementTableTimeStampRequest defaultInstance;
+    public static IncrementTableTimeStampRequest getDefaultInstance() {
       return defaultInstance;
     }
 
-    public ClearCacheForTableRequest getDefaultInstanceForType() {
+    public IncrementTableTimeStampRequest getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -6387,7 +6387,7 @@ public final class MetaDataProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private ClearCacheForTableRequest(
+    private IncrementTableTimeStampRequest(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -6444,28 +6444,28 @@ public final class MetaDataProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableRequest_descriptor;
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableRequest_fieldAccessorTable
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.Builder.class);
+              org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<ClearCacheForTableRequest> PARSER =
-        new com.google.protobuf.AbstractParser<ClearCacheForTableRequest>() {
-      public ClearCacheForTableRequest parsePartialFrom(
+    public static com.google.protobuf.Parser<IncrementTableTimeStampRequest> PARSER =
+        new com.google.protobuf.AbstractParser<IncrementTableTimeStampRequest>() {
+      public IncrementTableTimeStampRequest parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ClearCacheForTableRequest(input, extensionRegistry);
+        return new IncrementTableTimeStampRequest(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<ClearCacheForTableRequest> getParserForType() {
+    public com.google.protobuf.Parser<IncrementTableTimeStampRequest> getParserForType() {
       return PARSER;
     }
 
@@ -6622,10 +6622,10 @@ public final class MetaDataProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest)) {
+      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)) {
         return super.equals(obj);
       }
-      org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest) obj;
+      org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest) obj;
 
       boolean result = true;
       result = result && (hasTenantId() == other.hasTenantId());
@@ -6682,53 +6682,53 @@ public final class MetaDataProtos {
       return hash;
     }
 
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(byte[] data)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseDelimitedFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -6737,7 +6737,7 @@ public final class MetaDataProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest prototype) {
+    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -6749,24 +6749,24 @@ public final class MetaDataProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code ClearCacheForTableRequest}
+     * Protobuf type {@code IncrementTableTimeStampRequest}
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequestOrBuilder {
+       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequestOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableRequest_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableRequest_fieldAccessorTable
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.Builder.class);
+                org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.Builder.class);
       }
 
-      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.newBuilder()
+      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -6803,23 +6803,23 @@ public final class MetaDataProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableRequest_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_descriptor;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest getDefaultInstanceForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.getDefaultInstance();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest getDefaultInstanceForType() {
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance();
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest build() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest result = buildPartial();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest build() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest buildPartial() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest(this);
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest buildPartial() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -6844,16 +6844,16 @@ public final class MetaDataProtos {
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest) {
-          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest)other);
+        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest) {
+          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest other) {
-        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest other) {
+        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance()) return this;
         if (other.hasTenantId()) {
           setTenantId(other.getTenantId());
         }
@@ -6894,11 +6894,11 @@ public final class MetaDataProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest parsedMessage = null;
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -7050,39 +7050,39 @@ public final class MetaDataProtos {
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:ClearCacheForTableRequest)
+      // @@protoc_insertion_point(builder_scope:IncrementTableTimeStampRequest)
     }
 
     static {
-      defaultInstance = new ClearCacheForTableRequest(true);
+      defaultInstance = new IncrementTableTimeStampRequest(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:ClearCacheForTableRequest)
+    // @@protoc_insertion_point(class_scope:IncrementTableTimeStampRequest)
   }
 
-  public interface ClearCacheForTableResponseOrBuilder
+  public interface IncrementTableTimeStampResponseOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
   }
   /**
-   * Protobuf type {@code ClearCacheForTableResponse}
+   * Protobuf type {@code IncrementTableTimeStampResponse}
    */
-  public static final class ClearCacheForTableResponse extends
+  public static final class IncrementTableTimeStampResponse extends
       com.google.protobuf.GeneratedMessage
-      implements ClearCacheForTableResponseOrBuilder {
-    // Use ClearCacheForTableResponse.newBuilder() to construct.
-    private ClearCacheForTableResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements IncrementTableTimeStampResponseOrBuilder {
+    // Use IncrementTableTimeStampResponse.newBuilder() to construct.
+    private IncrementTableTimeStampResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private ClearCacheForTableResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private IncrementTableTimeStampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final ClearCacheForTableResponse defaultInstance;
-    public static ClearCacheForTableResponse getDefaultInstance() {
+    private static final IncrementTableTimeStampResponse defaultInstance;
+    public static IncrementTableTimeStampResponse getDefaultInstance() {
       return defaultInstance;
     }
 
-    public ClearCacheForTableResponse getDefaultInstanceForType() {
+    public IncrementTableTimeStampResponse getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -7092,7 +7092,7 @@ public final class MetaDataProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private ClearCacheForTableResponse(
+    private IncrementTableTimeStampResponse(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -7128,28 +7128,28 @@ public final class MetaDataProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableResponse_descriptor;
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableResponse_fieldAccessorTable
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.Builder.class);
+              org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<ClearCacheForTableResponse> PARSER =
-        new com.google.protobuf.AbstractParser<ClearCacheForTableResponse>() {
-      public ClearCacheForTableResponse parsePartialFrom(
+    public static com.google.protobuf.Parser<IncrementTableTimeStampResponse> PARSER =
+        new com.google.protobuf.AbstractParser<IncrementTableTimeStampResponse>() {
+      public IncrementTableTimeStampResponse parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ClearCacheForTableResponse(input, extensionRegistry);
+        return new IncrementTableTimeStampResponse(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<ClearCacheForTableResponse> getParserForType() {
+    public com.google.protobuf.Parser<IncrementTableTimeStampResponse> getParserForType() {
       return PARSER;
     }
 
@@ -7193,10 +7193,10 @@ public final class MetaDataProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse)) {
+      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse)) {
         return super.equals(obj);
       }
-      org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse) obj;
+      org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) obj;
 
       boolean result = true;
       result = result &&
@@ -7217,53 +7217,53 @@ public final class MetaDataProtos {
       return hash;
     }
 
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(byte[] data)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseDelimitedFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -7272,7 +7272,7 @@ public final class MetaDataProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse prototype) {
+    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -7284,24 +7284,24 @@ public final class MetaDataProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code ClearCacheForTableResponse}
+     * Protobuf type {@code IncrementTableTimeStampResponse}
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponseOrBuilder {
+       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponseOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableResponse_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableResponse_fieldAccessorTable
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.Builder.class);
+                org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.Builder.class);
       }
 
-      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.newBuilder()
+      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -7330,38 +7330,38 @@ public final class MetaDataProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearCacheForTableResponse_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_descriptor;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse getDefaultInstanceForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse getDefaultInstanceForType() {
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance();
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse build() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse result = buildPartial();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse build() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse buildPartial() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse(this);
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse buildPartial() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse(this);
         onBuilt();
         return result;
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse) {
-          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse)other);
+        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) {
+          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse other) {
-        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse other) {
+        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance()) return this;
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -7374,11 +7374,11 @@ public final class MetaDataProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse parsedMessage = null;
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -7388,15 +7388,15 @@ public final class MetaDataProtos {
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:ClearCacheForTableResponse)
+      // @@protoc_insertion_point(builder_scope:IncrementTableTimeStampResponse)
     }
 
     static {
-      defaultInstance = new ClearCacheForTableResponse(true);
+      defaultInstance = new IncrementTableTimeStampResponse(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:ClearCacheForTableResponse)
+    // @@protoc_insertion_point(class_scope:IncrementTableTimeStampResponse)
   }
 
   /**
@@ -7472,12 +7472,12 @@ public final class MetaDataProtos {
           com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse> done);
 
       /**
-       * <code>rpc clearCacheForTable(.ClearCacheForTableRequest) returns (.ClearCacheForTableResponse);</code>
+       * <code>rpc incrementTableTimeStamp(.IncrementTableTimeStampRequest) returns (.IncrementTableTimeStampResponse);</code>
        */
-      public abstract void clearCacheForTable(
+      public abstract void incrementTableTimeStamp(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest request,
-          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse> done);
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
+          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done);
 
     }
 
@@ -7549,11 +7549,11 @@ public final class MetaDataProtos {
         }
 
         @java.lang.Override
-        public  void clearCacheForTable(
+        public  void incrementTableTimeStamp(
             com.google.protobuf.RpcController controller,
-            org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest request,
-            com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse> done) {
-          impl.clearCacheForTable(controller, request, done);
+            org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
+            com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done) {
+          impl.incrementTableTimeStamp(controller, request, done);
         }
 
       };
@@ -7595,7 +7595,7 @@ public final class MetaDataProtos {
             case 7:
               return impl.getVersion(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest)request);
             case 8:
-              return impl.clearCacheForTable(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest)request);
+              return impl.incrementTableTimeStamp(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -7627,7 +7627,7 @@ public final class MetaDataProtos {
             case 7:
               return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest.getDefaultInstance();
             case 8:
-              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.getDefaultInstance();
+              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -7659,7 +7659,7 @@ public final class MetaDataProtos {
             case 7:
               return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse.getDefaultInstance();
             case 8:
-              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance();
+              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -7733,12 +7733,12 @@ public final class MetaDataProtos {
         com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse> done);
 
     /**
-     * <code>rpc clearCacheForTable(.ClearCacheForTableRequest) returns (.ClearCacheForTableResponse);</code>
+     * <code>rpc incrementTableTimeStamp(.IncrementTableTimeStampRequest) returns (.IncrementTableTimeStampResponse);</code>
      */
-    public abstract void clearCacheForTable(
+    public abstract void incrementTableTimeStamp(
         com.google.protobuf.RpcController controller,
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest request,
-        com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse> done);
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
+        com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done);
 
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
@@ -7803,8 +7803,8 @@ public final class MetaDataProtos {
               done));
           return;
         case 8:
-          this.clearCacheForTable(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest)request,
-            com.google.protobuf.RpcUtil.<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse>specializeCallback(
+          this.incrementTableTimeStamp(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse>specializeCallback(
               done));
           return;
         default:
@@ -7838,7 +7838,7 @@ public final class MetaDataProtos {
         case 7:
           return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest.getDefaultInstance();
         case 8:
-          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest.getDefaultInstance();
+          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -7870,7 +7870,7 @@ public final class MetaDataProtos {
         case 7:
           return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse.getDefaultInstance();
         case 8:
-          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance();
+          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -8012,19 +8012,19 @@ public final class MetaDataProtos {
             org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse.getDefaultInstance()));
       }
 
-      public  void clearCacheForTable(
+      public  void incrementTableTimeStamp(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest request,
-          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse> done) {
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
+          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done) {
         channel.callMethod(
           getDescriptor().getMethods().get(8),
           controller,
           request,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance(),
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance(),
           com.google.protobuf.RpcUtil.generalizeCallback(
             done,
-            org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.class,
-            org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance()));
+            org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.class,
+            org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance()));
       }
     }
 
@@ -8074,9 +8074,9 @@ public final class MetaDataProtos {
           org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest request)
           throws com.google.protobuf.ServiceException;
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse clearCacheForTable(
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse incrementTableTimeStamp(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest request)
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request)
           throws com.google.protobuf.ServiceException;
     }
 
@@ -8183,15 +8183,15 @@ public final class MetaDataProtos {
       }
 
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse clearCacheForTable(
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse incrementTableTimeStamp(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest request)
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse) channel.callBlockingMethod(
+        return (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) channel.callBlockingMethod(
           getDescriptor().getMethods().get(8),
           controller,
           request,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse.getDefaultInstance());
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance());
       }
 
     }
@@ -8255,15 +8255,15 @@ public final class MetaDataProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_GetVersionResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_ClearCacheForTableRequest_descriptor;
+    internal_static_IncrementTableTimeStampRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_ClearCacheForTableRequest_fieldAccessorTable;
+      internal_static_IncrementTableTimeStampRequest_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_ClearCacheForTableResponse_descriptor;
+    internal_static_IncrementTableTimeStampResponse_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_ClearCacheForTableResponse_fieldAccessorTable;
+      internal_static_IncrementTableTimeStampResponse_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -8291,32 +8291,33 @@ public final class MetaDataProtos {
       "st\022\036\n\026tableMetadataMutations\030\001 \003(\014\"\023\n\021Cl" +
       "earCacheRequest\"\024\n\022ClearCacheResponse\"\023\n" +
       "\021GetVersionRequest\"%\n\022GetVersionResponse" +
-      "\022\017\n\007version\030\001 \002(\003\"m\n\031ClearCacheForTableR" +
-      "equest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaName\030\002",
-      " \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTimesta" +
-      "mp\030\004 \002(\003\"\034\n\032ClearCacheForTableResponse*\212" +
-      "\002\n\014MutationCode\022\030\n\024TABLE_ALREADY_EXISTS\020" +
-      "\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLUMN_NOT_FOU" +
-      "ND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020\003\022\035\n\031CONCU" +
-      "RRENT_TABLE_MUTATION\020\004\022\027\n\023TABLE_NOT_IN_R" +
-      "EGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006\022\034\n\030UNALLO" +
-      "WED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_COLUMNS\020\010\022" +
-      "\032\n\026PARENT_TABLE_NOT_FOUND\020\t2\222\004\n\017MetaData" +
-      "Service\022/\n\010getTable\022\020.GetTableRequest\032\021.",
-      "MetaDataResponse\0225\n\013createTable\022\023.Create" +
-      "TableRequest\032\021.MetaDataResponse\0221\n\tdropT" +
-      "able\022\021.DropTableRequest\032\021.MetaDataRespon" +
-      "se\0221\n\taddColumn\022\021.AddColumnRequest\032\021.Met" +
-      "aDataResponse\0223\n\ndropColumn\022\022.DropColumn" +
-      "Request\032\021.MetaDataResponse\022?\n\020updateInde" +
-      "xState\022\030.UpdateIndexStateRequest\032\021.MetaD" +
-      "ataResponse\0225\n\nclearCache\022\022.ClearCacheRe" +
-      "quest\032\023.ClearCacheResponse\0225\n\ngetVersion" +
-      "\022\022.GetVersionRequest\032\023.GetVersionRespons",
-      "e\022M\n\022clearCacheForTable\022\032.ClearCacheForT" +
-      "ableRequest\032\033.ClearCacheForTableResponse" +
-      "BB\n(org.apache.phoenix.coprocessor.gener" +
-      "atedB\016MetaDataProtosH\001\210\001\001\240\001\001"
+      "\022\017\n\007version\030\001 \002(\003\"r\n\036IncrementTableTimeS" +
+      "tampRequest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaN",
+      "ame\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTi" +
+      "mestamp\030\004 \002(\003\"!\n\037IncrementTableTimeStamp" +
+      "Response*\212\002\n\014MutationCode\022\030\n\024TABLE_ALREA" +
+      "DY_EXISTS\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLU" +
+      "MN_NOT_FOUND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020" +
+      "\003\022\035\n\031CONCURRENT_TABLE_MUTATION\020\004\022\027\n\023TABL" +
+      "E_NOT_IN_REGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006" +
+      "\022\034\n\030UNALLOWED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_" +
+      "COLUMNS\020\010\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t2\241\004" +
+      "\n\017MetaDataService\022/\n\010getTable\022\020.GetTable",
+      "Request\032\021.MetaDataResponse\0225\n\013createTabl" +
+      "e\022\023.CreateTableRequest\032\021.MetaDataRespons" +
+      "e\0221\n\tdropTable\022\021.DropTableRequest\032\021.Meta" +
+      "DataResponse\0221\n\taddColumn\022\021.AddColumnReq" +
+      "uest\032\021.MetaDataResponse\0223\n\ndropColumn\022\022." +
+      "DropColumnRequest\032\021.MetaDataResponse\022?\n\020" +
+      "updateIndexState\022\030.UpdateIndexStateReque" +
+      "st\032\021.MetaDataResponse\0225\n\nclearCache\022\022.Cl" +
+      "earCacheRequest\032\023.ClearCacheResponse\0225\n\n" +
+      "getVersion\022\022.GetVersionRequest\032\023.GetVers",
+      "ionResponse\022\\\n\027incrementTableTimeStamp\022\037" +
+      ".IncrementTableTimeStampRequest\032 .Increm" +
+      "entTableTimeStampResponseBB\n(org.apache." +
+      "phoenix.coprocessor.generatedB\016MetaDataP" +
+      "rotosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -8389,17 +8390,17 @@ public final class MetaDataProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_GetVersionResponse_descriptor,
               new java.lang.String[] { "Version", });
-          internal_static_ClearCacheForTableRequest_descriptor =
+          internal_static_IncrementTableTimeStampRequest_descriptor =
             getDescriptor().getMessageTypes().get(11);
-          internal_static_ClearCacheForTableRequest_fieldAccessorTable = new
+          internal_static_IncrementTableTimeStampRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_ClearCacheForTableRequest_descriptor,
+              internal_static_IncrementTableTimeStampRequest_descriptor,
               new java.lang.String[] { "TenantId", "SchemaName", "TableName", "ClientTimestamp", });
-          internal_static_ClearCacheForTableResponse_descriptor =
+          internal_static_IncrementTableTimeStampResponse_descriptor =
             getDescriptor().getMessageTypes().get(12);
-          internal_static_ClearCacheForTableResponse_fieldAccessorTable = new
+          internal_static_IncrementTableTimeStampResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_ClearCacheForTableResponse_descriptor,
+              internal_static_IncrementTableTimeStampResponse_descriptor,
               new java.lang.String[] { });
           return null;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 15c8ebe..e156555 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -103,5 +103,5 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
     public boolean supportsFeature(Feature feature);
     
     public String getUserName();
-    public void clearCacheForTable(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
+    public void incrementTableTimeStamp(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index fd60eb8..9f30236 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -76,8 +76,6 @@ import org.apache.phoenix.coprocessor.ServerCachingEndpointImpl;
 import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.AddColumnRequest;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableRequest;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheForTableResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest;
@@ -86,6 +84,8 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.DropTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
@@ -1844,7 +1844,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
     
     @Override
-    public void clearCacheForTable(final byte[] tenantId, final byte[] schemaName, final byte[] tableName,
+    public void incrementTableTimeStamp(final byte[] tenantId, final byte[] schemaName, final byte[] tableName,
             final long clientTS) throws SQLException {
         // clear the meta data cache for the table here
         try {
@@ -1852,17 +1852,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             HTableInterface htable = this.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
             try {
                 htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-                        new Batch.Call<MetaDataService, ClearCacheForTableResponse>() {
+                        new Batch.Call<MetaDataService, IncrementTableTimeStampResponse>() {
                             @Override
-                            public ClearCacheForTableResponse call(MetaDataService instance) throws IOException {
+                            public IncrementTableTimeStampResponse call(MetaDataService instance) throws IOException {
                                 ServerRpcController controller = new ServerRpcController();
-                                BlockingRpcCallback<ClearCacheForTableResponse> rpcCallback = new BlockingRpcCallback<ClearCacheForTableResponse>();
-                                ClearCacheForTableRequest.Builder builder = ClearCacheForTableRequest.newBuilder();
+                                BlockingRpcCallback<IncrementTableTimeStampResponse> rpcCallback = new BlockingRpcCallback<IncrementTableTimeStampResponse>();
+                                IncrementTableTimeStampRequest.Builder builder = IncrementTableTimeStampRequest.newBuilder();
                                 builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantId));
                                 builder.setTableName(HBaseZeroCopyByteString.wrap(tableName));
                                 builder.setSchemaName(HBaseZeroCopyByteString.wrap(schemaName));
                                 builder.setClientTimestamp(clientTS);
-                                instance.clearCacheForTable(controller, builder.build(), rpcCallback);
+                                instance.incrementTableTimeStamp(controller, builder.build(), rpcCallback);
                                 if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
                                 return rpcCallback.get();
                             }
@@ -2055,5 +2055,4 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private void throwConnectionClosedException() {
         throw new IllegalStateException("Connection to the cluster is closed");
     }
-    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 055bc79..37e46b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -190,7 +190,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
     
     @Override
-    public void clearCacheForTable(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
+    public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
             throws SQLException {}
     // TODO: share this with ConnectionQueryServicesImpl
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 8bd2c61..86523fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -228,8 +228,8 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
     
     @Override
-    public void clearCacheForTable(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
+    public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
             throws SQLException {
-        getDelegate().clearCacheForTable(tenantId, schemaName, tableName, clientTS);
+        getDelegate().incrementTableTimeStamp(tenantId, schemaName, tableName, clientTS);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index c4bfebb..18ea16d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -489,20 +489,17 @@ public class MetaDataClient {
         Long scn = connection.getSCN();
         // Always invalidate the cache
         long clientTS = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
-        connection.getQueryServices().clearCacheForTable(tenantIdBytes,
-                Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
-                Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS);
-        String query = "SELECT CURRENT_DATE(),"+ LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
+        String query = "SELECT CURRENT_DATE() - " + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
                 + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
-                + " IS NULL AND " + REGION_NAME + " IS NULL";
+                + " IS NULL AND " + REGION_NAME + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
         ResultSet rs = connection.createStatement().executeQuery(query);
         long msSinceLastUpdate = Long.MAX_VALUE;
-        if (rs.next() && rs.getDate(2) != null) {
-            msSinceLastUpdate = rs.getDate(1).getTime() - rs.getDate(2).getTime();
+        if (rs.next()) {
+            msSinceLastUpdate = rs.getLong(1);
         }
         if (msSinceLastUpdate >= msMinBetweenUpdates) {
             // Here create the select query.
-            String countQuery = "SELECT /*+ NO_CACHE */ count(*) FROM " + table.getName().getString();
+            String countQuery = "SELECT /*+ NO_CACHE NO_INDEX */ count(*) FROM " + table.getName().getString();
             PhoenixStatement statement = (PhoenixStatement) connection.createStatement();
             QueryPlan plan = statement.compileQuery(countQuery);
             Scan scan = plan.getContext().getScan();
@@ -517,8 +514,9 @@ public class MetaDataClient {
             tempPtr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
             // A single Cell will be returned with the count(*) - we decode that here
             long rowCount = PDataType.LONG.getCodec().decodeLong(tempPtr, SortOrder.getDefault());
-            // We need to update the stats table
-            connection.getQueryServices().clearCacheForTable(tenantIdBytes,
+            // We need to update the stats table so that client will pull the new one with
+            // the updated stats.
+            connection.getQueryServices().incrementTableTimeStamp(tenantIdBytes,
                     Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
                     Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS);
             return new  MutationState(0, connection, rowCount);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/450400f4/phoenix-protocol/src/main/MetaDataService.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/MetaDataService.proto b/phoenix-protocol/src/main/MetaDataService.proto
index 9174b4d..accd5f2 100644
--- a/phoenix-protocol/src/main/MetaDataService.proto
+++ b/phoenix-protocol/src/main/MetaDataService.proto
@@ -92,14 +92,14 @@ message GetVersionResponse {
   required int64 version = 1;
 }
 
-message ClearCacheForTableRequest {
+message IncrementTableTimeStampRequest {
   required bytes tenantId = 1;
   required bytes schemaName  = 2;
   required bytes tableName = 3;
   required int64 clientTimestamp = 4;
 }
 
-message ClearCacheForTableResponse {
+message IncrementTableTimeStampResponse {
 }
 
 service MetaDataService {
@@ -127,6 +127,6 @@ service MetaDataService {
    rpc getVersion(GetVersionRequest)
     returns (GetVersionResponse);
    
-   rpc clearCacheForTable(ClearCacheForTableRequest)
-    returns (ClearCacheForTableResponse);
+   rpc incrementTableTimeStamp(IncrementTableTimeStampRequest)
+    returns (IncrementTableTimeStampResponse);
 }


[3/3] git commit: Change tests that set server-side config to NeedsOwnMiniClusterTest Destroy driver in MutableIndexReplicationIT to prevent other tests from failing

Posted by ja...@apache.org.
Change tests that set server-side config to NeedsOwnMiniClusterTest
Destroy driver in MutableIndexReplicationIT to prevent other tests from failing

Conflicts:
	phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a60dc35a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a60dc35a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a60dc35a

Branch: refs/heads/4.0
Commit: a60dc35ac5b70f7a944b1918d12806e9d7dc2532
Parents: 450400f
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 7 00:45:31 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 7 02:28:38 2014 -0700

----------------------------------------------------------------------
 .../end2end/BaseClientManagedTimeIT.java        | 29 +---------
 .../phoenix/end2end/BaseHBaseManagedTimeIT.java | 25 +++------
 .../BaseOwnClusterClientManagedTimeIT.java      | 30 ++++++++++
 .../BaseOwnClusterHBaseManagedTimeIT.java       | 30 ++++++++++
 .../phoenix/end2end/BaseOwnClusterIT.java       | 32 +++++++++++
 .../end2end/BaseTenantSpecificTablesIT.java     | 12 +---
 .../apache/phoenix/end2end/CreateTableIT.java   |  2 +-
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |  6 +-
 .../phoenix/end2end/MultiCfQueryExecIT.java     |  8 +--
 .../phoenix/end2end/ParallelIteratorsIT.java    | 25 ++-------
 .../phoenix/end2end/StatsCollectorIT.java       | 12 +---
 .../end2end/TenantSpecificTablesDDLIT.java      |  2 +-
 .../end2end/TenantSpecificTablesDMLIT.java      |  2 +-
 .../index/MutableIndexReplicationIT.java        | 56 +++++++++++++------
 .../phoenix/end2end/index/SaltedIndexIT.java    |  2 -
 .../org/apache/phoenix/util/ReadOnlyProps.java  |  4 ++
 .../java/org/apache/phoenix/query/BaseTest.java | 58 ++++++++++++++++++--
 .../java/org/apache/phoenix/util/TestUtil.java  |  5 ++
 18 files changed, 223 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
index 2f60ea1..14dffcb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
@@ -17,10 +17,6 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Date;
-
 import javax.annotation.concurrent.NotThreadSafe;
 
 import org.apache.hadoop.conf.Configuration;
@@ -64,28 +60,7 @@ public abstract class BaseClientManagedTimeIT extends BaseTest {
     }
     
     @AfterClass
-    public static void dropTables() throws Exception {
-        try {
-            disableAndDropNonSystemTables(driver);
-        } finally {
-            try {
-                assertTrue(destroyDriver(driver));
-            } finally {
-                driver = null;
-            }
-        }
-    }
-    
-    protected static void initATableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception {
-        BaseTest.initATableValues(tenantId, splits, date, ts, getUrl());
-    }
-    
-    protected static void initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception {
-        BaseTest.initEntityHistoryTableValues(tenantId, splits, date, ts, getUrl());
-    }
-    
-    protected static void initSaltedEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception {
-        BaseTest.initSaltedEntityHistoryTableValues(tenantId, splits, date, ts, getUrl());
+    public static void doTeardown() throws Exception {
+        dropNonSystemTables();
     }
-        
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseHBaseManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseHBaseManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseHBaseManagedTimeIT.java
index 1d0f004..679f29d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseHBaseManagedTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseHBaseManagedTimeIT.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertTrue;
-
 import javax.annotation.concurrent.NotThreadSafe;
 
 import org.apache.hadoop.conf.Configuration;
@@ -51,27 +49,18 @@ public abstract class BaseHBaseManagedTimeIT extends BaseTest {
         return new Configuration(config);
     }
     
-    @After
-    public void cleanUpAfterTest() throws Exception {
-        deletePriorTables(HConstants.LATEST_TIMESTAMP, getUrl());    
-    }
-    
     @BeforeClass
     public static void doSetup() throws Exception {
         setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
     }
     
     @AfterClass
-    public static void dropTables() throws Exception {
-        try {
-            disableAndDropNonSystemTables(driver);
-        } finally {
-            try {
-                assertTrue(destroyDriver(driver));
-            } finally {
-                driver = null;
-            }
-        }
+    public static void doTeardown() throws Exception {
+        dropNonSystemTables();
+    }
+    
+    @After
+    public void cleanUpAfterTest() throws Exception {
+        deletePriorTables(HConstants.LATEST_TIMESTAMP, getUrl());    
     }
-        
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
new file mode 100644
index 0000000..d986e40
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.junit.After;
+
+public class BaseOwnClusterClientManagedTimeIT extends BaseOwnClusterIT {
+    @After
+    public void cleanUpAfterTest() throws Exception {
+        long ts = nextTimestamp();
+        deletePriorTables(ts - 1, getUrl());    
+    }    
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
new file mode 100644
index 0000000..3472230
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.junit.After;
+
+public class BaseOwnClusterHBaseManagedTimeIT extends BaseOwnClusterIT {
+    @After
+    public void cleanUpAfterTest() throws Exception {
+        deletePriorTables(HConstants.LATEST_TIMESTAMP, getUrl());    
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
new file mode 100644
index 0000000..330022b
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.query.BaseTest;
+import org.junit.AfterClass;
+import org.junit.experimental.categories.Category;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class BaseOwnClusterIT extends BaseTest {
+    @AfterClass
+    public static void doTeardown() throws Exception {
+        tearDownMiniCluster();
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
index fe08ac5..362fa08 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
@@ -30,15 +30,8 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-/**
- * Describe your class here.
- *
- * 
- * @since 2.2
- */
-
-@Category(ClientManagedTimeTest.class)
-public abstract class BaseTenantSpecificTablesIT extends BaseClientManagedTimeIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public abstract class BaseTenantSpecificTablesIT extends BaseOwnClusterClientManagedTimeIT {
     protected static final String TENANT_ID = "ZZTop";
     protected static final String TENANT_TYPE_ID = "abc";
     protected static String PHOENIX_JDBC_TENANT_SPECIFIC_URL;
@@ -80,7 +73,6 @@ public abstract class BaseTenantSpecificTablesIT extends BaseClientManagedTimeIT
     }
     
     @BeforeClass
-    @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Must update config before starting server

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 1f9afc4..0b5c105 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -94,8 +94,8 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
                 "                id INTEGER not null primary key desc\n" + 
                 "                ) ";
         conn.createStatement().execute(ddl);
-        conn = DriverManager.getConnection(getUrl(), props);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
+        conn = DriverManager.getConnection(getUrl(), props);
         try {
             conn.createStatement().execute(ddl);
             fail();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
index 9b26c2e..b7e3314 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
@@ -44,17 +44,17 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-@Category(ClientManagedTimeTest.class)
-public class KeyOnlyIT extends BaseClientManagedTimeIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
     
     @BeforeClass
-    @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Must update config before starting server
         props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
+    
     @Test
     public void testKeyOnly() throws Exception {
         long ts = nextTimestamp();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index f4665b2..a28d346 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -44,18 +44,18 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-@Category(ClientManagedTimeTest.class)
-public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
     private static final String MULTI_CF = "MULTI_CF";
     
     @BeforeClass
-    @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20l));
+        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
+    
     protected static void initTableValues(long ts) throws Exception {
         ensureTableCreated(getUrl(),MULTI_CF,null, ts-2);
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
index 5a1be0b..dd5b661 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.util.TestUtil.STABLE_NAME;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.analyzeTable;
 import static org.apache.phoenix.util.TestUtil.getAllSplits;
 import static org.apache.phoenix.util.TestUtil.getSplits;
 import static org.junit.Assert.assertEquals;
@@ -29,7 +30,6 @@ import java.sql.PreparedStatement;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PDataType;
@@ -40,8 +40,8 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-@Category(HBaseManagedTimeTest.class)
-public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT {
+@Category(NeedsOwnMiniClusterTest.class)
+public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
 
     protected static final byte[] KMIN  = new byte[] {'!'};
     protected static final byte[] KMIN2  = new byte[] {'.'};
@@ -59,7 +59,6 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT {
     protected static final byte[] KP = new byte[] { 'p' };
     
     @BeforeClass
-    @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Must update config before starting server
@@ -76,12 +75,8 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT {
         PreparedStatement stmt = conn.prepareStatement("ANALYZE STABLE");
         stmt.execute();
         
-        // number of regions > target query concurrency
-        PhoenixPreparedStatement pstmt;
         List<KeyRange> keyRanges;
         
-        pstmt = conn.prepareStatement("SELECT COUNT(*) FROM STABLE").unwrap(PhoenixPreparedStatement.class);
-        pstmt.execute();
         keyRanges = getAllSplits(conn);
         assertEquals("Unexpected number of splits: " + keyRanges, 7, keyRanges.size());
         assertEquals(newKeyRange(KeyRange.UNBOUND, KMIN), keyRanges.get(0));
@@ -114,26 +109,18 @@ public class ParallelIteratorsIT extends BaseHBaseManagedTimeIT {
         byte[][] splits = new byte[][] { K3, K9, KR };
         ensureTableCreated(getUrl(), STABLE_NAME, splits);
 
-        PreparedStatement stmt = conn.prepareStatement("ANALYZE STABLE");
-        stmt.execute();
         List<KeyRange> keyRanges = getAllSplits(conn);
         assertEquals(4, keyRanges.size());
         upsert(conn, new byte[][] { KMIN, K4, K11 });
-        stmt = conn.prepareStatement("ANALYZE STABLE");
-        stmt.execute();
-        conn.prepareStatement("SELECT COUNT(*) FROM STABLE").executeQuery(); 
+        analyzeTable(conn);
         keyRanges = getAllSplits(conn);
         assertEquals(7, keyRanges.size());
         upsert(conn, new byte[][] { KMIN2, K5, K12 });
-        stmt = conn.prepareStatement("ANALYZE STABLE");
-        stmt.execute();
-        conn.prepareStatement("SELECT COUNT(*) FROM STABLE").executeQuery();
+        analyzeTable(conn);
         keyRanges = getAllSplits(conn);
         assertEquals(10, keyRanges.size());
         upsert(conn, new byte[][] { K1, K6, KP });
-        stmt = conn.prepareStatement("ANALYZE STABLE");
-        stmt.execute();
-        conn.prepareStatement("SELECT COUNT(*) FROM STABLE").executeQuery();
+        analyzeTable(conn);
         keyRanges = getAllSplits(conn);
         assertEquals(13, keyRanges.size());
         conn.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 3c0d683..03ff316 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -22,20 +22,14 @@ import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
-@Category(HBaseManagedTimeTest.class)
-public class StatsCollectorIT extends BaseHBaseManagedTimeIT {
-    //private static String url;
-    private static int frequency = 5000;
+@Category(NeedsOwnMiniClusterTest.class)
+public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
     
     @BeforeClass
-    @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20l));
-        props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(frequency));
-        props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(20));
-        props.put(QueryServices.THREAD_POOL_SIZE_ATTRIB, Integer.toString(20));
+        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 0e2b75c..a9fa412 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -56,7 +56,7 @@ import org.apache.phoenix.util.StringUtil;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-@Category(ClientManagedTimeTest.class)
+@Category(NeedsOwnMiniClusterTest.class)
 public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
index 21f80f9..f0ed1d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
@@ -39,7 +39,7 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-@Category(ClientManagedTimeTest.class)
+@Category(NeedsOwnMiniClusterTest.class)
 public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index 9981ed8..ca59df8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -17,12 +17,36 @@
  */
 package org.apache.phoenix.end2end.index;
 
-import com.google.common.collect.Maps;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
@@ -32,22 +56,15 @@ import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.*;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.IOException;
-import java.sql.*;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
+import com.google.common.collect.Maps;
 
 /**
  * Test that we correctly replicate indexes over replication
@@ -94,8 +111,15 @@ public class MutableIndexReplicationIT extends BaseTest {
 
     @AfterClass
     public static void tearDownAfterClass() throws Exception {
-        utility2.shutdownMiniCluster();
-        utility1.shutdownMiniCluster();
+        try {
+            destroyDriver();
+        } finally {
+            try {
+                utility2.shutdownMiniCluster();
+            } finally {
+                utility1.shutdownMiniCluster();
+            }
+        }
     }
 
     private static void setupConfigsAndStartCluster() throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
index f72cc3c..0279283 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
@@ -57,8 +57,6 @@ public class SaltedIndexIT extends BaseIndexIT {
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         // Drop the HBase table metadata for this test
         props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        // Don't put guideposts in
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(10000000));
         // Must update config before starting server
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index fe039d3..68b0879 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -265,4 +265,8 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
     public Iterator<Entry<String, String>> iterator() {
         return props.entrySet().iterator();
     }
+    
+    public boolean isEmpty() {
+        return props.isEmpty();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 653c90d..7bdab7b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -154,7 +154,7 @@ import com.google.common.collect.Sets;
 public abstract class BaseTest {
     private static final Map<String,String> tableDDLMap;
     private static Logger logger = Logger.getLogger("BaseTest.class");
-    private static HBaseTestingUtility utility = null; 
+
     static {
         ImmutableMap.Builder<String,String> builder = ImmutableMap.builder();
         builder.put(ENTITY_HISTORY_TABLE_NAME,"create table " + ENTITY_HISTORY_TABLE_NAME +
@@ -454,6 +454,7 @@ public abstract class BaseTest {
     private static String url;
     protected static PhoenixTestDriver driver;
     private static boolean clusterInitialized = false;
+    private static HBaseTestingUtility utility;
     protected static final Configuration config = HBaseConfiguration.create(); 
     
     protected static String getUrl() {
@@ -485,6 +486,39 @@ public abstract class BaseTest {
         }
     }
     
+    protected static void destroyDriver() throws Exception {
+        if (driver != null) {
+            try {
+                assertTrue(destroyDriver(driver));
+            } finally {
+                driver = null;
+            }
+        }
+    }
+    
+    protected static void dropNonSystemTables() throws Exception {
+        try {
+            disableAndDropNonSystemTables();
+        } finally {
+            destroyDriver();
+        }
+    }
+
+    protected static void tearDownMiniCluster() throws Exception {
+        try {
+            destroyDriver();
+        } finally {
+            try {
+                if (utility != null) {
+                    utility.shutdownMiniCluster();
+                }
+            } finally {
+                utility = null;
+                clusterInitialized = false;
+            }
+        }
+    }
+            
     protected static void setUpTestDriver(ReadOnlyProps props) throws Exception {
         String url = checkClusterInitialized(props);
         if (driver == null) {
@@ -1051,16 +1085,28 @@ public abstract class BaseTest {
             conn.close();
         }
     }
-
+    
+    protected static void initATableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception {
+        initATableValues(tenantId, splits, date, ts, getUrl());
+    }
+    
+    protected static void initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception {
+        initEntityHistoryTableValues(tenantId, splits, date, ts, getUrl());
+    }
+    
+    protected static void initSaltedEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception {
+        initSaltedEntityHistoryTableValues(tenantId, splits, date, ts, getUrl());
+    }
+        
     protected static void initEntityHistoryTableValues(String tenantId, byte[][] splits, String url) throws Exception {
-        initEntityHistoryTableValues(tenantId, splits, null);
+        initEntityHistoryTableValues(tenantId, splits, null, null, url);
     }
     
     protected static void initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, String url) throws Exception {
-        initEntityHistoryTableValues(tenantId, splits, date, null);
+        initEntityHistoryTableValues(tenantId, splits, date, null, url);
     }
     
-    protected static void initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception {
+    private static void initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception {
         if (ts == null) {
             ensureTableCreated(url, ENTITY_HISTORY_TABLE_NAME, splits);
         } else {
@@ -1271,7 +1317,7 @@ public abstract class BaseTest {
     /**
      * Disable and drop all the tables except SYSTEM.CATALOG and SYSTEM.SEQUENCE
      */
-    protected static void disableAndDropNonSystemTables(PhoenixTestDriver driver) throws Exception {
+    private static void disableAndDropNonSystemTables() throws Exception {
         HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin();
         try {
             HTableDescriptor[] tables = admin.listTables();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a60dc35a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 4c33660..65bc7b2 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -497,6 +497,11 @@ public class TestUtil {
         conn.createStatement().execute(query);
     }
     
+    public static void analyzeTable(Connection conn) throws IOException, SQLException {
+        String query = "ANALYZE " + STABLE_NAME;
+        conn.createStatement().execute(query);
+    }
+    
     public static void analyzeTable(String url, long ts, String tableName) throws IOException, SQLException {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));