You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2016/08/26 21:36:59 UTC

[01/16] phoenix git commit: Set version to 4.9.0-HBase-1.0-SNAPSHOT after release

Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.0 [created] be72e538c


Set version to 4.9.0-HBase-1.0-SNAPSHOT after release


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/89014bbc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/89014bbc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/89014bbc

Branch: refs/heads/4.x-HBase-1.0
Commit: 89014bbcfdecd6e5f0659563ba3a8e9d81261c12
Parents: 6fcb083
Author: Ankit Singhal <an...@gmail.com>
Authored: Thu Aug 4 16:56:03 2016 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Thu Aug 4 16:56:03 2016 +0530

----------------------------------------------------------------------
 phoenix-assembly/pom.xml           | 2 +-
 phoenix-client/pom.xml             | 2 +-
 phoenix-core/pom.xml               | 2 +-
 phoenix-flume/pom.xml              | 2 +-
 phoenix-hive/pom.xml               | 2 +-
 phoenix-pherf/pom.xml              | 2 +-
 phoenix-pig/pom.xml                | 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml        | 2 +-
 phoenix-server/pom.xml             | 2 +-
 phoenix-spark/pom.xml              | 2 +-
 phoenix-tracing-webapp/pom.xml     | 2 +-
 pom.xml                            | 2 +-
 13 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 4412ad3..2238511 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-assembly</artifactId>
   <name>Phoenix Assembly</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index e1f171e..d7e6f05 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-client</artifactId>
   <name>Phoenix Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index f630169..6fa42bc 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-core</artifactId>
   <name>Phoenix Core</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 82d3719..2c1ff96 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-flume</artifactId>
   <name>Phoenix - Flume</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-hive/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index c1c8cd0..a8db2d0 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-hive</artifactId>
   <name>Phoenix - Hive</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index eda4fc0..cb9406a 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.8.0-HBase-1.0</version>
+		<version>4.9.0-HBase-1.0-SNAPSHOT</version>
 	</parent>
 
 	<artifactId>phoenix-pherf</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index eb4822d..d5a037e 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-pig</artifactId>
   <name>Phoenix - Pig</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-queryserver-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver-client/pom.xml b/phoenix-queryserver-client/pom.xml
index f1e9d2e..7c6ceee 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver-client</artifactId>
   <name>Phoenix Query Server Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-queryserver/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 32ae5f3..d4e640f 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver</artifactId>
   <name>Phoenix Query Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 682cbe9..38bd75a 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-server</artifactId>
   <name>Phoenix Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index d44d6e5..522ca3f 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -28,7 +28,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.8.0-HBase-1.0</version>
+    <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-spark</artifactId>
   <name>Phoenix - Spark</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/phoenix-tracing-webapp/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-tracing-webapp/pom.xml b/phoenix-tracing-webapp/pom.xml
index 182b0b1..4423675 100755
--- a/phoenix-tracing-webapp/pom.xml
+++ b/phoenix-tracing-webapp/pom.xml
@@ -27,7 +27,7 @@
     <parent>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix</artifactId>
-      <version>4.8.0-HBase-1.0</version>
+      <version>4.9.0-HBase-1.0-SNAPSHOT</version>
     </parent>
 
     <artifactId>phoenix-tracing-webapp</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/89014bbc/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 8be8548..327c9bb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.phoenix</groupId>
   <artifactId>phoenix</artifactId>
-  <version>4.8.0-HBase-1.0</version>
+  <version>4.9.0-HBase-1.0-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Phoenix</name>
   <description>A SQL layer over HBase</description>


[13/16] phoenix git commit: PHOENIX-3195 Addendum. (James Taylor)

Posted by td...@apache.org.
PHOENIX-3195 Addendum. (James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/551072cb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/551072cb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/551072cb

Branch: refs/heads/4.x-HBase-1.0
Commit: 551072cbc1becce61688e1b3fee45f92acd047dd
Parents: 833cf3b
Author: Lars Hofhansl <la...@apache.org>
Authored: Mon Aug 22 20:21:54 2016 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Mon Aug 22 20:25:51 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/compile/ExpressionCompiler.java    | 6 +++++-
 .../main/java/org/apache/phoenix/compile/OrderByCompiler.java  | 3 +++
 2 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/551072cb/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 1278494..0fd1876 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -393,6 +393,10 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
         return ref;
     }
 
+    protected void addColumn(PColumn column) {
+        context.getScan().addColumn(column.getFamilyName().getBytes(), column.getName().getBytes());
+    }
+
     @Override
     public Expression visit(ColumnParseNode node) throws SQLException {
         ColumnRef ref = resolveColumn(node);
@@ -407,7 +411,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
             return LiteralExpression.newConstant(column.getDataType().toObject(ptr), column.getDataType());
         }
         if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(column)) { // project only kv columns
-            context.getScan().addColumn(column.getFamilyName().getBytes(), column.getName().getBytes());
+            addColumn(column);
         }
         Expression expression = ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive());
         Expression wrappedExpression = wrapGroupByExpression(expression);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/551072cb/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
index 6804375..9bc0c31 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
@@ -36,6 +36,7 @@ import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.types.PInteger;
@@ -97,6 +98,8 @@ public class OrderByCompiler {
             compiler = new ExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY) {
                 @Override
                 protected Expression addExpression(Expression expression) {return expression;}
+                @Override
+                protected void addColumn(PColumn column) {}
             };
         } else {
             compiler = new ExpressionCompiler(context, groupBy);


[02/16] phoenix git commit: PHOENIX-3156 DistinctPrefixFilter optimization produces incorrect results with some non-pk WHERE conditions.

Posted by td...@apache.org.
PHOENIX-3156 DistinctPrefixFilter optimization produces incorrect results with some non-pk WHERE conditions.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/503fba67
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/503fba67
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/503fba67

Branch: refs/heads/4.x-HBase-1.0
Commit: 503fba67d42da9facc9f072e24ced3f033eb6d55
Parents: 89014bb
Author: Lars Hofhansl <la...@apache.org>
Authored: Fri Aug 5 21:39:56 2016 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Fri Aug 5 21:40:57 2016 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DistinctPrefixFilterIT.java    | 7 ++++---
 .../java/org/apache/phoenix/iterate/BaseResultIterators.java  | 2 +-
 2 files changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/503fba67/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
index 9d31070..1a0e4e1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
@@ -171,8 +171,6 @@ public class DistinctPrefixFilterIT extends BaseHBaseManagedTimeTableReuseIT {
         testPlan("SELECT COUNT(*) FROM (SELECT DISTINCT(prefix1) FROM "+testTable+")", true);
         testPlan("SELECT /*+ RANGE_SCAN */ DISTINCT prefix1 FROM "+testTable, false);
         testPlan("SELECT DISTINCT prefix1, prefix2 FROM "+testTable, true);
-        // use the filter even when the boolean expression filter is used
-        testPlan("SELECT DISTINCT prefix1, prefix2 FROM "+testTable+ " WHERE col1 > 0.5", true);
         // do not use the filter when the distinct is on the entire key
         testPlan("SELECT DISTINCT prefix1, prefix2, prefix3 FROM "+testTable, false);
         testPlan("SELECT DISTINCT (prefix1, prefix2, prefix3) FROM "+testTable, false);
@@ -193,7 +191,6 @@ public class DistinctPrefixFilterIT extends BaseHBaseManagedTimeTableReuseIT {
         testPlan("SELECT (prefix1, prefix2, prefix3) FROM "+testTable+" GROUP BY (prefix1, prefix2, prefix3)", false);
         testPlan("SELECT prefix1, 1, 2 FROM "+testTable+" GROUP BY prefix1", true);
         testPlan("SELECT prefix1 FROM "+testTable+" GROUP BY prefix1, col1", false);
-        testPlan("SELECT DISTINCT prefix1, prefix2 FROM "+testTable+" WHERE col1 > 0.5", true);
 
         testPlan("SELECT COUNT(DISTINCT prefix1) FROM "+testTable+" HAVING COUNT(col1) > 10", false);
         testPlan("SELECT COUNT(DISTINCT prefix1) FROM "+testTable+" ORDER BY COUNT(col1)", true);
@@ -204,6 +201,8 @@ public class DistinctPrefixFilterIT extends BaseHBaseManagedTimeTableReuseIT {
         testPlan("SELECT COUNT(DISTINCT prefix1) FROM "+testTable+" HAVING COUNT(DISTINCT prefix2) > 10", false);
         testPlan("SELECT COUNT(DISTINCT prefix1) FROM "+testTable+" HAVING COUNT(DISTINCT prefix1) > 10", false);
         testPlan("SELECT COUNT(DISTINCT prefix1) / 10 FROM "+testTable, false);
+        // do not use the filter when the boolean expression filter is used
+        testPlan("SELECT DISTINCT prefix1, prefix2 FROM "+testTable+" WHERE col1 > 0.5", false);
     }
 
     private void testPlan(String query, boolean optimizable) throws Exception {
@@ -220,6 +219,7 @@ public class DistinctPrefixFilterIT extends BaseHBaseManagedTimeTableReuseIT {
         testSkipRange("SELECT %s prefix1 FROM "+ testTableF + " GROUP BY prefix1, prefix2 HAVING prefix2 = 2147483647", 2);
         testSkipRange("SELECT %s prefix1 FROM "+ testTableF + " GROUP BY prefix1, prefix2 HAVING prefix1 = 2147483647", 1);
         testSkipRange("SELECT %s prefix1 FROM "+ testTableF + " WHERE col1 > 0.99 GROUP BY prefix1, prefix2 HAVING prefix2 = 2", -1);
+        testSkipRange("SELECT %s prefix1 FROM "+ testTableF + " WHERE col1 >=0 and col2 > 990 GROUP BY prefix1, prefix2 HAVING prefix2 = 2", -1);
 
         testSkipRange("SELECT %s prefix1 FROM "+ testTableV + " GROUP BY prefix1, prefix2 HAVING prefix1 IN ('1','2')", 6);
         testSkipRange("SELECT %s prefix1 FROM "+ testTableV + " GROUP BY prefix1, prefix2 HAVING prefix1 IN ('1','2') AND prefix2 IN ('1','2')", 4);
@@ -228,6 +228,7 @@ public class DistinctPrefixFilterIT extends BaseHBaseManagedTimeTableReuseIT {
         testSkipRange("SELECT %s prefix1 FROM "+ testTableV + " GROUP BY prefix1, prefix2 HAVING prefix2 = '22'", 1);
         testSkipRange("SELECT %s prefix1 FROM "+ testTableV + " GROUP BY prefix1, prefix2 HAVING prefix1 = '22'", 1);
         testSkipRange("SELECT %s prefix1 FROM "+ testTableV + " WHERE col1 > 0.99 GROUP BY prefix1, prefix2 HAVING prefix2 = '2'", -1);
+        testSkipRange("SELECT %s prefix1 FROM "+ testTableV + " WHERE col1 >= 0 and col2 > 990 GROUP BY prefix1, prefix2 HAVING prefix2 = '2'", -1);
 
         testCommonGroupBy(testTableF);
         testCommonGroupBy(testTableV);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/503fba67/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index d0ade72..ceba000 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -227,7 +227,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
             }
 
             int cols = plan.getGroupBy().getOrderPreservingColumnCount();
-            if (cols > 0 &&
+            if (cols > 0 && context.getWhereConditionColumns().size() == 0 &&
                 !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) &&
                 cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() &&
                 plan.getGroupBy().isOrderPreserving() &&


[05/16] phoenix git commit: PHOENIX-3149 Local index got corrupted if intermediate compaction happen during the split.(Sergey Soldatov)

Posted by td...@apache.org.
PHOENIX-3149 Local index got corrupted if intermediate compaction happen during the split.(Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b64b08f3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b64b08f3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b64b08f3

Branch: refs/heads/4.x-HBase-1.0
Commit: b64b08f313490fabd6675066007897f5b472e92c
Parents: 62d4566
Author: Ankit Singhal <an...@gmail.com>
Authored: Sun Aug 7 13:55:00 2016 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Sun Aug 7 13:55:38 2016 +0530

----------------------------------------------------------------------
 .../hbase/regionserver/IndexHalfStoreFileReaderGenerator.java       | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b64b08f3/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 1bd6222..45b5aef 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -185,7 +185,6 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
             long earliestPutTs, InternalScanner s, CompactionRequest request) throws IOException {
         if (!store.getFamily().getNameAsString()
                 .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)
-                || !scanType.equals(ScanType.COMPACT_DROP_DELETES)
                 || s != null
                 || !store.hasReferences()) {
             return s;


[08/16] phoenix git commit: PHOENIX-2995 Write performance severely degrades with large number of views

Posted by td...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 2425033..6241c3a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -65,9 +65,9 @@ public class TransactionUtil {
             .build().buildException();
     }
     
-    public static TransactionAwareHTable getTransactionAwareHTable(HTableInterface htable, PTable table) {
+    public static TransactionAwareHTable getTransactionAwareHTable(HTableInterface htable, boolean isImmutableRows) {
     	// Conflict detection is not needed for tables with write-once/append-only data
-    	return new TransactionAwareHTable(htable, table.isImmutableRows() ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
+    	return new TransactionAwareHTable(htable, isImmutableRows ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
     }
     
     // we resolve transactional tables at the txn read pointer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
index 9c92679..ef88c8c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
@@ -31,17 +31,15 @@ import com.google.common.collect.Sets;
 
 public class PMetaDataImplTest {
     
-    private static PMetaData addToTable(PMetaData metaData, String name, int size, TestTimeKeeper timeKeeper) throws SQLException {
+    private static void addToTable(PMetaData metaData, String name, int size, TestTimeKeeper timeKeeper) throws SQLException {
         PTable table = new PSizedTable(new PTableKey(null,name), size);
-        PMetaData newMetaData = metaData.addTable(table, System.currentTimeMillis());
+        metaData.addTable(table, System.currentTimeMillis());
         timeKeeper.incrementTime();
-        return newMetaData;
     }
     
-    private static PMetaData removeFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) throws SQLException {
-        PMetaData newMetaData =  metaData.removeTable(null, name, null, HConstants.LATEST_TIMESTAMP);
+    private static void removeFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) throws SQLException {
+        metaData.removeTable(null, name, null, HConstants.LATEST_TIMESTAMP);
         timeKeeper.incrementTime();
-        return newMetaData;
     }
     
     private static PTable getFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) throws TableNotFoundException {
@@ -77,41 +75,41 @@ public class PMetaDataImplTest {
         long maxSize = 10;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
         PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
-        metaData = addToTable(metaData, "a", 5, timeKeeper);
+        addToTable(metaData, "a", 5, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "b", 4, timeKeeper);
+        addToTable(metaData, "b", 4, timeKeeper);
         assertEquals(2, metaData.size());
-        metaData = addToTable(metaData, "c", 3, timeKeeper);
+        addToTable(metaData, "c", 3, timeKeeper);
         assertEquals(2, metaData.size());
         assertNames(metaData, "b","c");
 
-        metaData = addToTable(metaData, "b", 8, timeKeeper);
+        addToTable(metaData, "b", 8, timeKeeper);
         assertEquals(1, metaData.size());
         assertNames(metaData, "b");
 
-        metaData = addToTable(metaData, "d", 11, timeKeeper);
+        addToTable(metaData, "d", 11, timeKeeper);
         assertEquals(1, metaData.size());
         assertNames(metaData, "d");
         
-        metaData = removeFromTable(metaData, "d", timeKeeper);
+        removeFromTable(metaData, "d", timeKeeper);
         assertNames(metaData);
         
-        metaData = addToTable(metaData, "a", 4, timeKeeper);
+        addToTable(metaData, "a", 4, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "b", 3, timeKeeper);
+        addToTable(metaData, "b", 3, timeKeeper);
         assertEquals(2, metaData.size());
-        metaData = addToTable(metaData, "c", 2, timeKeeper);
+        addToTable(metaData, "c", 2, timeKeeper);
         assertEquals(3, metaData.size());
         assertNames(metaData, "a", "b","c");
         
         getFromTable(metaData, "a", timeKeeper);
-        metaData = addToTable(metaData, "d", 3, timeKeeper);
+        addToTable(metaData, "d", 3, timeKeeper);
         assertEquals(3, metaData.size());
         assertNames(metaData, "c", "a","d");
         
         // Clone maintains insert order
         metaData = metaData.clone();
-        metaData = addToTable(metaData, "e", 6, timeKeeper);
+        addToTable(metaData, "e", 6, timeKeeper);
         assertEquals(2, metaData.size());
         assertNames(metaData, "d","e");
     }
@@ -121,17 +119,17 @@ public class PMetaDataImplTest {
         long maxSize = 5;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
         PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
-        metaData = addToTable(metaData, "a", 1, timeKeeper);
+        addToTable(metaData, "a", 1, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "b", 1, timeKeeper);
+        addToTable(metaData, "b", 1, timeKeeper);
         assertEquals(2, metaData.size());
         assertNames(metaData, "a", "b");
-        metaData = addToTable(metaData, "c", 3, timeKeeper);
+        addToTable(metaData, "c", 3, timeKeeper);
         assertEquals(3, metaData.size());
         assertNames(metaData, "a", "b", "c");
         getFromTable(metaData, "a", timeKeeper);
         getFromTable(metaData, "b", timeKeeper);
-        metaData = addToTable(metaData, "d", 3, timeKeeper);
+        addToTable(metaData, "d", 3, timeKeeper);
         assertEquals(3, metaData.size());
         assertNames(metaData, "a", "b", "d");
     }
@@ -141,18 +139,18 @@ public class PMetaDataImplTest {
         long maxSize = 5;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
         PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
-        metaData = addToTable(metaData, "a", 1, timeKeeper);
+        addToTable(metaData, "a", 1, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "b", 1, timeKeeper);
+        addToTable(metaData, "b", 1, timeKeeper);
         assertEquals(2, metaData.size());
-        metaData = addToTable(metaData, "c", 5, timeKeeper);
+        addToTable(metaData, "c", 5, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "d", 20, timeKeeper);
+        addToTable(metaData, "d", 20, timeKeeper);
         assertEquals(1, metaData.size());
         assertNames(metaData, "d");
-        metaData = addToTable(metaData, "e", 1, timeKeeper);
+        addToTable(metaData, "e", 1, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "f", 2, timeKeeper);
+        addToTable(metaData, "f", 2, timeKeeper);
         assertEquals(2, metaData.size());
         assertNames(metaData, "e", "f");
     }
@@ -162,18 +160,18 @@ public class PMetaDataImplTest {
         long maxSize = 0;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
         PMetaData metaData = new PMetaDataImpl(0, maxSize, timeKeeper);
-        metaData = addToTable(metaData, "a", 1, timeKeeper);
+        addToTable(metaData, "a", 1, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "b", 1, timeKeeper);
+        addToTable(metaData, "b", 1, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "c", 5, timeKeeper);
+        addToTable(metaData, "c", 5, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "d", 20, timeKeeper);
+        addToTable(metaData, "d", 20, timeKeeper);
         assertEquals(1, metaData.size());
         assertNames(metaData, "d");
-        metaData = addToTable(metaData, "e", 1, timeKeeper);
+        addToTable(metaData, "e", 1, timeKeeper);
         assertEquals(1, metaData.size());
-        metaData = addToTable(metaData, "f", 2, timeKeeper);
+        addToTable(metaData, "f", 2, timeKeeper);
         assertEquals(1, metaData.size());
         assertNames(metaData, "f");
     }
@@ -184,12 +182,12 @@ public class PMetaDataImplTest {
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
         PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
         String tableName = "a";
-        metaData = addToTable(metaData, tableName, 1, timeKeeper);
+        addToTable(metaData, tableName, 1, timeKeeper);
         PTableRef aTableRef = metaData.getTableRef(new PTableKey(null,tableName));
         assertNotNull(aTableRef);
         assertEquals(1, metaData.getAge(aTableRef));
         tableName = "b";
-        metaData = addToTable(metaData, tableName, 1, timeKeeper);
+        addToTable(metaData, tableName, 1, timeKeeper);
         PTableRef bTableRef = metaData.getTableRef(new PTableKey(null,tableName));
         assertNotNull(bTableRef);
         assertEquals(1, metaData.getAge(bTableRef));


[14/16] phoenix git commit: PHOENIX-930 duplicated columns cause query exception and drop table exception (Junegunn Choi, Kalyan Hadoop)

Posted by td...@apache.org.
PHOENIX-930 duplicated columns cause query exception and drop table exception (Junegunn Choi, Kalyan Hadoop)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f2e0ab23
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f2e0ab23
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f2e0ab23

Branch: refs/heads/4.x-HBase-1.0
Commit: f2e0ab23ff740656f106a986f800e5754f3be879
Parents: 551072c
Author: James Taylor <ja...@apache.org>
Authored: Fri Aug 19 16:34:28 2016 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Tue Aug 23 15:18:49 2016 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/index/IndexMetadataIT.java  | 40 ++++++++++
 .../apache/phoenix/schema/MetaDataClient.java   | 78 +++++---------------
 .../org/apache/phoenix/schema/PTableImpl.java   | 12 +--
 .../compile/CreateTableCompilerTest.java        | 47 ++++++++++++
 4 files changed, 113 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2e0ab23/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
index 1af15a1..a48cc4b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMetadataIT.java
@@ -43,6 +43,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.ColumnAlreadyExistsException;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
@@ -401,6 +402,45 @@ public class IndexMetadataIT extends BaseHBaseManagedTimeIT {
             conn.close();
         }
     }
+
+    @Test
+    public void testTableWithSameColumnNames() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+        try {
+            String ddl = "create table test_table (char_pk varchar not null,"
+        		+ " int_col integer, long_col integer, int_col integer"
+        		+ " constraint pk primary key (char_pk))";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            fail("Should have caught exception");
+        } catch (ColumnAlreadyExistsException e) {
+            assertEquals(SQLExceptionCode.COLUMN_EXIST_IN_DEF.getErrorCode(), e.getErrorCode());
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testTableWithSameColumnNamesWithFamily() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+        try {
+            String ddl = "create table test_table (char_pk varchar not null,"
+        		+ " a.int_col integer, a.long_col integer,"
+        		+ " a.int_col integer, b.long_col integer"
+        		+ " constraint pk primary key (char_pk))";
+            PreparedStatement stmt = conn.prepareStatement(ddl);
+            stmt.execute();
+            fail("Should have caught exception");
+        } catch (ColumnAlreadyExistsException e) {
+            assertEquals(SQLExceptionCode.COLUMN_EXIST_IN_DEF.getErrorCode(), e.getErrorCode());
+        } finally {
+            conn.close();
+        }
+    }
     
     @Test
     public void testIndexDefinitionWithSameColumnNamesInTwoFamily() throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2e0ab23/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 1264e32..7f97f4a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.schema;
 
-import static com.google.common.collect.Lists.newArrayListWithExpectedSize;
 import static com.google.common.collect.Sets.newLinkedHashSet;
 import static com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize;
 import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
@@ -111,6 +110,7 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
@@ -120,7 +120,6 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -216,7 +215,6 @@ import org.apache.tephra.TxConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Objects;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
@@ -1886,7 +1884,7 @@ public class MetaDataClient {
             }
 
             List<ColumnDef> colDefs = statement.getColumnDefs();
-            List<PColumn> columns;
+            LinkedHashMap<PColumn,PColumn> columns;
             LinkedHashSet<PColumn> pkColumns;
 
             if (tenantId != null && !sharedTable) {
@@ -1905,7 +1903,7 @@ public class MetaDataClient {
             if (tableType == PTableType.VIEW) {
                 physicalNames = Collections.singletonList(PNameFactory.newName(parent.getPhysicalName().getString()));
                 if (viewType == ViewType.MAPPED) {
-                    columns = newArrayListWithExpectedSize(colDefs.size());
+                    columns = Maps.newLinkedHashMap();
                     pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size());
                 } else {
                     // Propagate property values to VIEW.
@@ -1926,8 +1924,10 @@ public class MetaDataClient {
                     if (saltBucketNum != null) { // Don't include salt column in columns, as it should not have it when created
                         allColumns = allColumns.subList(1, allColumns.size());
                     }
-                    columns = newArrayListWithExpectedSize(allColumns.size() + colDefs.size());
-                    columns.addAll(allColumns);
+                    columns = new LinkedHashMap<PColumn,PColumn>(allColumns.size() + colDefs.size());
+                    for (PColumn column : allColumns) {
+                        columns.put(column, column);
+                    }
                     pkColumns = newLinkedHashSet(parent.getPKColumns());
 
                     // Add row linking from view to its parent table
@@ -1943,7 +1943,7 @@ public class MetaDataClient {
                     linkStatement.execute();
                 }
             } else {
-                columns = newArrayListWithExpectedSize(colDefs.size());
+                columns = new LinkedHashMap<PColumn,PColumn>(colDefs.size());
                 pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1); // in case salted
             }
 
@@ -2026,11 +2026,9 @@ public class MetaDataClient {
                         throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
                     }
                 }
-                if (tableType == PTableType.VIEW && hasColumnWithSameNameAndFamily(columns, column)) {
-                    // we only need to check for dup columns for views because they inherit columns from parent
+                if (columns.put(column, column) != null) {
                     throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
                 }
-                columns.add(column);
                 if ((colDef.getDataType() == PVarbinary.INSTANCE || colDef.getDataType().isArrayType())
                         && SchemaUtil.isPKColumn(column)
                         && pkColumnsIterator.hasNext()) {
@@ -2129,7 +2127,7 @@ public class MetaDataClient {
                 PName newSchemaName = PNameFactory.newName(schemaName);
                 PTable table = PTableImpl.makePTable(tenantId,newSchemaName, PNameFactory.newName(tableName), tableType,
                         null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM,
-                        PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME), null, columns, null, null,
+                        PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME), null, columns.values(), null, null,
                         Collections.<PTable>emptyList(), isImmutableRows,
                         Collections.<PName>emptyList(), defaultFamilyName == null ? null :
                                 PNameFactory.newName(defaultFamilyName), null,
@@ -2163,14 +2161,14 @@ public class MetaDataClient {
             
             List<Mutation> columnMetadata = Lists.newArrayListWithExpectedSize(columns.size());
             try (PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN_CREATE_TABLE)) {
-                for (int i = 0; i < columns.size(); i++) {
-                    PColumn column = columns.get(i);
+                for (Map.Entry<PColumn, PColumn> entry : columns.entrySet()) {
+                    PColumn column = entry.getValue();
                     final int columnPosition = column.getPosition();
                     // For client-side cache, we need to update the column
                     // set the autoPartition column attributes   
                     if (parent != null && parent.getAutoPartitionSeqName() != null
                             && parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent)).equals(column)) {
-                        columns.set(i, column = new DelegateColumn(column) {
+                        entry.setValue(column = new DelegateColumn(column) {
                             @Override
                             public byte[] getViewConstant() {
                                 // set to non-null value so that we will generate a Put that 
@@ -2186,7 +2184,7 @@ public class MetaDataClient {
                     }
                     else if (isViewColumnReferenced != null) {
                         if (viewColumnConstants != null && columnPosition < viewColumnConstants.length) {
-                            columns.set(i, column = new DelegateColumn(column) {
+                            entry.setValue(column = new DelegateColumn(column) {
                                 @Override
                                 public byte[] getViewConstant() {
                                     return viewColumnConstants[columnPosition];
@@ -2197,7 +2195,7 @@ public class MetaDataClient {
                                 }
                             });
                         } else {
-                            columns.set(i, column = new DelegateColumn(column) {
+                            entry.setValue(column = new DelegateColumn(column) {
                                 @Override
                                 public boolean isViewReferenced() {
                                     return isViewColumnReferenced.get(columnPosition);
@@ -2337,19 +2335,12 @@ public class MetaDataClient {
                 // If the parent table of the view has the auto partition sequence name attribute,
                 // set the view statement and relevant partition column attributes correctly
                 if (parent!=null && parent.getAutoPartitionSeqName()!=null) {
-                    int autoPartitionColIndex = -1;
-                    PColumn autoPartitionCol = parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent));
-                    for (int i=0; i<columns.size(); ++i) {
-                        if (autoPartitionCol.getName().equals(columns.get(i).getName())) {
-                            autoPartitionColIndex = i;
-                        }
-                    }
+                    final PColumn autoPartitionCol = parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent));
                     final Long autoPartitionNum = Long.valueOf(result.getAutoPartitionNum());
-                    final PColumn column = columns.get(autoPartitionColIndex);
-                    columns.set(autoPartitionColIndex, new DelegateColumn(column) {
+                    columns.put(autoPartitionCol, new DelegateColumn(autoPartitionCol) {
                         @Override
                         public byte[] getViewConstant() {
-                            PDataType dataType = column.getDataType();
+                            PDataType dataType = autoPartitionCol.getDataType();
                             Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                             byte[] bytes = new byte [dataType.getByteSize() + 1];
                             dataType.toBytes(val, bytes, 0);
@@ -2371,7 +2362,7 @@ public class MetaDataClient {
                 PName newSchemaName = PNameFactory.newName(schemaName);
                 PTable table =  PTableImpl.makePTable(
                         tenantId, newSchemaName, PNameFactory.newName(tableName), tableType, indexState, timestamp!=null ? timestamp : result.getMutationTime(),
-                        PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns,
+                        PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns.values(),
                         dataTableName == null ? null : newSchemaName, dataTableName == null ? null : PNameFactory.newName(dataTableName), Collections.<PTable>emptyList(), isImmutableRows,
                         physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType,
                         indexId, indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema);
@@ -2384,29 +2375,6 @@ public class MetaDataClient {
         }
     }
 
-    private byte[][] getSplitKeys(List<HRegionLocation> allTableRegions) {
-        if(allTableRegions.size() == 1) return null;
-        byte[][] splitKeys = new byte[allTableRegions.size()-1][];
-        int i = 0;
-        for (HRegionLocation region : allTableRegions) {
-            if (region.getRegionInfo().getStartKey().length != 0) {
-                splitKeys[i] = region.getRegionInfo().getStartKey();
-                i++;
-            }
-        }
-        return splitKeys;
-    }
-
-    private static boolean hasColumnWithSameNameAndFamily(Collection<PColumn> columns, PColumn column) {
-        for (PColumn currColumn : columns) {
-           if (Objects.equal(currColumn.getFamilyName(), column.getFamilyName()) &&
-               Objects.equal(currColumn.getName(), column.getName())) {
-               return true;
-           }
-        }
-        return false;
-    }
-
     /**
      * A table can be a parent table to tenant-specific tables if all of the following conditions are true:
      * <p>
@@ -2517,7 +2485,6 @@ public class MetaDataClient {
             Delete tableDelete = new Delete(key, clientTimeStamp);
             tableMetaData.add(tableDelete);
             boolean hasViewIndexTable = false;
-            boolean hasLocalIndexTable = false;
             if (parentTableName != null) {
                 byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
                 Delete linkDelete = new Delete(linkKey, clientTimeStamp);
@@ -2558,11 +2525,6 @@ public class MetaDataClient {
                         // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
                         // All multi-tenant tables have a view index table, so no need to check in that case
                         if (parentTableName == null) {
-                            for (PTable index : table.getIndexes()) {
-                                if (index.getIndexType() == IndexType.LOCAL) {
-                                    hasLocalIndexTable = true;
-                                } 
-                            }
                             hasViewIndexTable = true;// keeping always true for deletion of stats if view index present
                                                      // or not
                             MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(),
@@ -3716,7 +3678,7 @@ public class MetaDataClient {
                 || useSchemaStatement.getSchemaName().toUpperCase().equals(SchemaUtil.SCHEMA_FOR_DEFAULT_NAMESPACE)) {
             connection.setSchema(null);
         } else {
-            PSchema schema = FromCompiler.getResolverForSchema(useSchemaStatement, connection)
+            FromCompiler.getResolverForSchema(useSchemaStatement, connection)
                     .resolveSchema(useSchemaStatement.getSchemaName());
             connection.setSchema(useSchemaStatement.getSchemaName());
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2e0ab23/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 847979a..92c49f9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -24,6 +24,7 @@ import static org.apache.phoenix.schema.SaltingUtil.SALTING_COLUMN;
 import java.io.IOException;
 import java.sql.SQLException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -299,7 +300,7 @@ public class PTableImpl implements PTable {
 
     public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
             PIndexState state, long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum,
-            List<PColumn> columns, PName dataSchemaName, PName dataTableName, List<PTable> indexes,
+            Collection<PColumn> columns, PName dataSchemaName, PName dataTableName, List<PTable> indexes,
             boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
             boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
             IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
@@ -313,7 +314,7 @@ public class PTableImpl implements PTable {
 
     public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
             PIndexState state, long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum,
-            List<PColumn> columns, PName dataSchemaName, PName dataTableName, List<PTable> indexes,
+            Collection<PColumn> columns, PName dataSchemaName, PName dataTableName, List<PTable> indexes,
             boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
             boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
             IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
@@ -328,7 +329,7 @@ public class PTableImpl implements PTable {
     }
 
     private PTableImpl(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state,
-            long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum, List<PColumn> columns,
+            long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum, Collection<PColumn> columns,
             PName parentSchemaName, PName parentTableName, List<PTable> indexes, boolean isImmutableRows,
             List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant,
             boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType,
@@ -367,7 +368,7 @@ public class PTableImpl implements PTable {
     }
 
     private void init(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, long timeStamp, long sequenceNumber,
-            PName pkName, Integer bucketNum, List<PColumn> columns, PName parentSchemaName, PName parentTableName,
+            PName pkName, Integer bucketNum, Collection<PColumn> columns, PName parentSchemaName, PName parentTableName,
             List<PTable> indexes, boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL,
             boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
             IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, 
@@ -423,8 +424,7 @@ public class PTableImpl implements PTable {
             allColumns = new PColumn[columns.size()];
             pkColumns = Lists.newArrayListWithExpectedSize(columns.size());
         }
-        for (int i = 0; i < columns.size(); i++) {
-            PColumn column = columns.get(i);
+        for (PColumn column : columns) {
             allColumns[column.getPosition()] = column;
             PName familyName = column.getFamilyName();
             if (familyName == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2e0ab23/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java
new file mode 100644
index 0000000..ed907c3
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.BaseConnectionlessQueryTest;
+import org.apache.phoenix.schema.ColumnAlreadyExistsException;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.Test;
+
+public class CreateTableCompilerTest extends BaseConnectionlessQueryTest {
+    @Test
+    public void testCreateTableWithDuplicateColumns() throws SQLException {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
+        String ddl = "CREATE TABLE T (ID INTEGER PRIMARY KEY, DUPE INTEGER, DUPE INTEGER)";
+        try {
+            conn.createStatement().execute(ddl);
+            fail();
+        } catch (ColumnAlreadyExistsException e) {
+            assertEquals("DUPE", e.getColumnName());
+        }
+    }
+}


[03/16] phoenix git commit: PHOENIX-3158 COUNT(DISTINCT) may return null instead of 0 after PHOENIX-258.

Posted by td...@apache.org.
PHOENIX-3158 COUNT(DISTINCT) may return null instead of 0 after PHOENIX-258.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6198bf7b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6198bf7b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6198bf7b

Branch: refs/heads/4.x-HBase-1.0
Commit: 6198bf7b43fb522319cffb44d4237756cc4834f3
Parents: 503fba6
Author: Lars Hofhansl <la...@apache.org>
Authored: Sat Aug 6 16:27:32 2016 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Sat Aug 6 16:28:05 2016 -0700

----------------------------------------------------------------------
 .../it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java | 2 ++
 .../src/main/java/org/apache/phoenix/execute/AggregatePlan.java    | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6198bf7b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
index 1a0e4e1..e8e9e07 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctPrefixFilterIT.java
@@ -290,6 +290,8 @@ public class DistinctPrefixFilterIT extends BaseHBaseManagedTimeTableReuseIT {
 
         testCount("SELECT %s COUNT(DISTINCT col1) FROM " + testTable, -1);
         testCount("SELECT %s COUNT(DISTINCT col2) FROM " + testTable, -1);
+
+        testCount("SELECT %s COUNT(DISTINCT prefix1) FROM " + testTable + " WHERE col1 < 0", -1);
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6198bf7b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 136379d..00d478a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -223,7 +223,7 @@ public class AggregatePlan extends BaseQueryPlan {
 
         AggregatingResultIterator aggResultIterator;
         // No need to merge sort for ungrouped aggregation
-        if (groupBy.isEmpty()) {
+        if (groupBy.isEmpty() || groupBy.isUngroupedAggregate()) {
             aggResultIterator = new UngroupedAggregatingResultIterator(new ConcatResultIterator(iterators), aggregators);
         // If salted or local index we still need a merge sort as we'll potentially have multiple group by keys that aren't contiguous.
         } else if (groupBy.isOrderPreserving() && !(this.getTableRef().getTable().getBucketNum() != null || this.getTableRef().getTable().getIndexType() == IndexType.LOCAL)) {


[16/16] phoenix git commit: PHOENIX-3203 Tenant cache lookup in Global Cache fails in certain conditions

Posted by td...@apache.org.
PHOENIX-3203 Tenant cache lookup in Global Cache fails in certain conditions


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/be72e538
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/be72e538
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/be72e538

Branch: refs/heads/4.x-HBase-1.0
Commit: be72e538cec7d11256edefae05c1020e82e1882e
Parents: b6c31ef
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Thu Aug 25 15:25:18 2016 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Fri Aug 26 11:44:52 2016 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/index/MutableIndexIT.java   | 25 +++++++++++++-------
 .../org/apache/phoenix/cache/GlobalCache.java   |  4 ++--
 .../cache/aggcache/SpillableGroupByCache.java   |  5 ++--
 .../phoenix/coprocessor/GroupByCache.java       |  5 ++--
 .../GroupedAggregateRegionObserver.java         | 15 ++++++------
 .../coprocessor/HashJoinRegionScanner.java      |  5 ++--
 .../phoenix/coprocessor/ScanRegionObserver.java |  9 ++++---
 .../phoenix/index/PhoenixIndexMetaData.java     |  3 +--
 .../java/org/apache/phoenix/util/ScanUtil.java  |  5 ++--
 9 files changed, 40 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index fada2da..d4ff174 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -818,9 +818,9 @@ public class MutableIndexIT extends BaseHBaseManagedTimeIT {
           conn.createStatement().execute(
               "CREATE TABLE IF NOT EXISTS " + fullTableName + 
               "(TENANT_ID CHAR(15) NOT NULL,"+
-              "TYPE VARCHAR(25) NOT NULL,"+
+              "TYPE VARCHAR(25),"+
               "ENTITY_ID CHAR(15) NOT NULL,"+
-              "CONSTRAINT PK_CONSTRAINT PRIMARY KEY (TENANT_ID, TYPE, ENTITY_ID)) MULTI_TENANT=TRUE "
+              "CONSTRAINT PK_CONSTRAINT PRIMARY KEY (TENANT_ID, ENTITY_ID)) MULTI_TENANT=TRUE "
               + (!tableDDLOptions.isEmpty() ? "," + tableDDLOptions : "") );
           // create index
           conn.createStatement().execute("CREATE INDEX IF NOT EXISTS " + indexName + " ON " + fullTableName + " (ENTITY_ID, TYPE)");
@@ -830,14 +830,23 @@ public class MutableIndexIT extends BaseHBaseManagedTimeIT {
           props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "tenant1");
           // connection is tenant-specific
           try (Connection tenantConn = DriverManager.getConnection(getUrl(), props)) {
-              for (int i=0; i<2; ++i) {
-                  PreparedStatement stmt = tenantConn.prepareStatement(dml);
-                  stmt.setString(1, "00000000000000" + String.valueOf(i));
-                  stmt.setString(2, String.valueOf(i));
-                  assertEquals(1,stmt.executeUpdate());
-              }
+              // upsert one row
+              upsertRow(dml, tenantConn, 0);
+              tenantConn.commit();
+              ResultSet rs = tenantConn.createStatement().executeQuery("SELECT ENTITY_ID FROM " + fullTableName + " ORDER BY TYPE LIMIT 5");
+              assertTrue(rs.next());
+              // upsert two rows which ends up using the tenant cache
+              upsertRow(dml, tenantConn, 1);
+              upsertRow(dml, tenantConn, 2);
               tenantConn.commit();
           }
       }
   }
+
+private void upsertRow(String dml, Connection tenantConn, int i) throws SQLException {
+    PreparedStatement stmt = tenantConn.prepareStatement(dml);
+      stmt.setString(1, "00000000000000" + String.valueOf(i));
+      stmt.setString(2, String.valueOf(i));
+      assertEquals(1,stmt.executeUpdate());
+}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
index 7d04f5b..fe93eb5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
@@ -136,7 +136,7 @@ public class GlobalCache extends TenantCacheImpl {
      * @param tenantId the tenant ID or null if not applicable.
      * @return TenantCache
      */
-    public static TenantCache getTenantCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId) {
+    public static TenantCache getTenantCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId) {
         GlobalCache globalCache = GlobalCache.getInstance(env);
         TenantCache tenantCache = tenantId == null ? globalCache : globalCache.getChildTenantCache(tenantId);      
         return tenantCache;
@@ -165,7 +165,7 @@ public class GlobalCache extends TenantCacheImpl {
      * @param tenantId the ID that identifies the tenant
      * @return the existing or newly created TenantCache
      */
-    public TenantCache getChildTenantCache(ImmutableBytesWritable tenantId) {
+    public TenantCache getChildTenantCache(ImmutableBytesPtr tenantId) {
         TenantCache tenantCache = perTenantCacheMap.get(tenantId);
         if (tenantCache == null) {
             int maxTenantMemoryPerc = config.getInt(MAX_TENANT_MEMORY_PERC_ATTRIB, QueryServicesOptions.DEFAULT_MAX_TENANT_MEMORY_PERC);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index 0c76591..1200639 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -35,7 +35,6 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
@@ -126,7 +125,7 @@ public class SpillableGroupByCache implements GroupByCache {
      * @param aggs
      * @param ctxt
      */
-    public SpillableGroupByCache(final RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId,
+    public SpillableGroupByCache(final RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId,
             ServerAggregators aggs, final int estSizeNum) {
         totalNumElements = 0;
         this.aggregators = aggs;
@@ -217,7 +216,7 @@ public class SpillableGroupByCache implements GroupByCache {
      * implements an implicit put() of a new key/value tuple and loads it into the cache
      */
     @Override
-    public Aggregator[] cache(ImmutableBytesWritable cacheKey) {
+    public Aggregator[] cache(ImmutableBytesPtr cacheKey) {
         ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey);
         Aggregator[] rowAggregators = cache.get(key);
         if (rowAggregators == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java
index 38c4ca0..68d07a9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java
@@ -19,10 +19,9 @@ package org.apache.phoenix.coprocessor;
 
 import java.io.Closeable;
 
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
-
 import org.apache.phoenix.expression.aggregator.Aggregator;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 /**
  * 
@@ -34,6 +33,6 @@ import org.apache.phoenix.expression.aggregator.Aggregator;
  */
 public interface GroupByCache extends Closeable {
     long size();
-    Aggregator[] cache(ImmutableBytesWritable key);
+    Aggregator[] cache(ImmutableBytesPtr key);
     RegionScanner getScanner(RegionScanner s);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index cfe0e4a..49e3d71 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -137,7 +136,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
                 viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
             }
-            ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
+            ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
             innerScanner =
                     getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, 
                             c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
@@ -238,7 +237,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         
         private int estDistVals;
         
-        InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
+        InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
             int estValueSize = aggregators.getEstimatedByteSize();
             long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize);
             TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);
@@ -256,7 +255,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         }
 
         @Override
-        public Aggregator[] cache(ImmutableBytesWritable cacheKey) {
+        public Aggregator[] cache(ImmutableBytesPtr cacheKey) {
             ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey);
             Aggregator[] rowAggregators = aggregateMap.get(key);
             if (rowAggregators == null) {
@@ -345,7 +344,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         private GroupByCacheFactory() {
         }
         
-        GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
+        GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
             Configuration conf = env.getConfiguration();
             boolean spillableEnabled =
                     conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
@@ -411,7 +410,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                         hasMore = scanner.nextRaw(results);
                         if (!results.isEmpty()) {
                             result.setKeyValues(results);
-                            ImmutableBytesWritable key =
+                            ImmutableBytesPtr key =
                                 TupleUtil.getConcatenatedValue(result, expressions);
                             Aggregator[] rowAggregators = groupByCache.cache(key);
                             // Aggregate values here
@@ -454,7 +453,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         }
         return new BaseRegionScanner(scanner) {
             private long rowCount = 0;
-            private ImmutableBytesWritable currentKey = null;
+            private ImmutableBytesPtr currentKey = null;
 
             @Override
             public boolean next(List<Cell> results) throws IOException {
@@ -462,7 +461,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 boolean atLimit;
                 boolean aggBoundary = false;
                 MultiKeyValueTuple result = new MultiKeyValueTuple();
-                ImmutableBytesWritable key = null;
+                ImmutableBytesPtr key = null;
                 Aggregator[] rowAggregators = aggregators.getAggregators();
                 // If we're calculating no aggregate functions, we can exit at the
                 // start of a new row. Otherwise, we have to wait until an agg

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 2650225..480ee6d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.cache.GlobalCache;
@@ -64,7 +63,7 @@ public class HashJoinRegionScanner implements RegionScanner {
     private ValueBitSet[] tempSrcBitSet;
     
     @SuppressWarnings("unchecked")
-    public HashJoinRegionScanner(RegionScanner scanner, TupleProjector projector, HashJoinInfo joinInfo, ImmutableBytesWritable tenantId, RegionCoprocessorEnvironment env) throws IOException {
+    public HashJoinRegionScanner(RegionScanner scanner, TupleProjector projector, HashJoinInfo joinInfo, ImmutableBytesPtr tenantId, RegionCoprocessorEnvironment env) throws IOException {
         this.env = env;
         this.scanner = scanner;
         this.projector = projector;
@@ -196,7 +195,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                 for (Iterator<Tuple> iter = resultQueue.iterator(); iter.hasNext();) {
                     Tuple t = iter.next();
                     postFilter.reset();
-                    ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
+                    ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
                     try {
                         if (!postFilter.evaluate(t, tempPtr)) {
                             iter.remove();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 457555e..3cfe790 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.TenantCache;
@@ -47,6 +46,7 @@ import org.apache.phoenix.expression.KeyValueColumnExpression;
 import org.apache.phoenix.expression.OrderByExpression;
 import org.apache.phoenix.expression.function.ArrayIndexFunction;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.iterate.OffsetResultIterator;
 import org.apache.phoenix.iterate.OrderedResultIterator;
@@ -64,12 +64,11 @@ import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.tephra.Transaction;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
-import org.apache.tephra.Transaction;
-
 
 /**
  *
@@ -224,7 +223,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                     dataColumns, tupleProjector, dataRegion, indexMaintainer, tx,
                     viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr);
 
-        final ImmutableBytesWritable tenantId = ScanUtil.getTenantId(scan);
+        final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan);
         if (j != null) {
             innerScanner = new HashJoinRegionScanner(innerScanner, p, j, tenantId, c.getEnvironment());
         }
@@ -310,7 +309,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
      *  getting the first Tuple (which forces running through the entire region)
      *  since after this everything is held in memory
      */
-    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesWritable tenantId) throws Throwable {
+    private RegionScanner getTopNScanner(final ObserverContext<RegionCoprocessorEnvironment> c, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesPtr tenantId) throws Throwable {
         final Tuple firstTuple;
         TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
         long estSize = iterator.getEstimatedByteSize();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
index 2679f1c..818713b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.IndexMetaDataCache;
 import org.apache.phoenix.cache.ServerCacheClient;
@@ -71,7 +70,7 @@ public class PhoenixIndexMetaData implements IndexMetaData {
             };
         } else {
             byte[] tenantIdBytes = attributes.get(PhoenixRuntime.TENANT_ID_ATTRIB);
-            ImmutableBytesWritable tenantId = tenantIdBytes == null ? null : new ImmutableBytesWritable(tenantIdBytes);
+            ImmutableBytesPtr tenantId = tenantIdBytes == null ? null : new ImmutableBytesPtr(tenantIdBytes);
             TenantCache cache = GlobalCache.getTenantCache(env, tenantId);
             IndexMetaDataCache indexCache = (IndexMetaDataCache)cache.getServerCache(new ImmutableBytesPtr(uuid));
             if (indexCache == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/be72e538/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index d7f6f2f..b0e8a99 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -55,6 +55,7 @@ import org.apache.phoenix.execute.DescVarLengthFastByteComparisons;
 import org.apache.phoenix.filter.BooleanExpressionFilter;
 import org.apache.phoenix.filter.DistinctPrefixFilter;
 import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.KeyRange.Bound;
@@ -115,13 +116,13 @@ public class ScanUtil {
     // Use getTenantId and pass in column name to match against
     // in as PSchema attribute. If column name matches in 
     // KeyExpressions, set on scan as attribute
-    public static ImmutableBytesWritable getTenantId(Scan scan) {
+    public static ImmutableBytesPtr getTenantId(Scan scan) {
         // Create Scan with special aggregation column over which to aggregate
         byte[] tenantId = scan.getAttribute(PhoenixRuntime.TENANT_ID_ATTRIB);
         if (tenantId == null) {
             return null;
         }
-        return new ImmutableBytesWritable(tenantId);
+        return new ImmutableBytesPtr(tenantId);
     }
     
     public static void setCustomAnnotations(Scan scan, byte[] annotations) {


[11/16] phoenix git commit: PHOENIX-808 Create snapshot of SYSTEM.CATALOG prior to upgrade and restore on any failure

Posted by td...@apache.org.
PHOENIX-808 Create snapshot of SYSTEM.CATALOG prior to upgrade and restore on any failure


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b8ac187b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b8ac187b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b8ac187b

Branch: refs/heads/4.x-HBase-1.0
Commit: b8ac187b766ad6efb684f3a2640a9ec4dacdfe8a
Parents: 98e665f
Author: Samarth <sa...@salesforce.com>
Authored: Mon Aug 22 11:43:12 2016 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Mon Aug 22 11:43:12 2016 -0700

----------------------------------------------------------------------
 .../phoenix/coprocessor/MetaDataProtocol.java   |  19 +
 .../query/ConnectionQueryServicesImpl.java      | 370 ++++++++++++-------
 .../org/apache/phoenix/util/UpgradeUtil.java    |  11 +
 3 files changed, 274 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8ac187b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 8982fe7..dce89bd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -19,7 +19,9 @@ package org.apache.phoenix.coprocessor;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
@@ -27,6 +29,7 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import org.apache.phoenix.coprocessor.generated.PFunctionProtos;
 import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.schema.PColumn;
@@ -83,6 +86,22 @@ public abstract class MetaDataProtocol extends MetaDataService {
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 = MIN_TABLE_TIMESTAMP + 18;
     // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants
     public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0;
+    
+    // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). 
+    // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.
+    public static final Map<Long, String> TIMESTAMP_VERSION_MAP = new HashMap<>(10);
+    static {
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0, "4.1.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0, "4.2.0");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1, "4.2.1");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, "4.3.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, "4.5.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, "4.6.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, "4.7.x");
+        TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, "4.8.x");
+    }
+    public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER; 
+    
     // TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, nor should we need
     // a different code for every type of error.
     // ENTITY_ALREADY_EXISTS, ENTITY_NOT_FOUND, NEWER_ENTITY_FOUND, ENTITY_NOT_IN_REGION, CONCURRENT_MODIFICATION

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8ac187b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 23f6964..d55e5ca 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -18,6 +18,14 @@
 package org.apache.phoenix.query;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
+import static org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
+import static org.apache.phoenix.util.UpgradeUtil.getUpgradeSnapshotName;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_SEQUENCE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
+
 import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
 import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
 import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_PATCH_NUMBER;
@@ -2298,10 +2306,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         }
                         checkClosed();
                         PhoenixConnection metaConnection = null;
+                        boolean success = false;
+                        String snapshotName = null;
+                        String sysCatalogTableName = null;
                         try {
                             openConnection();
                             String noUpgradeProp = props.getProperty(PhoenixRuntime.NO_UPGRADE_ATTRIB);
-                            boolean upgradeSystemTables = !Boolean.TRUE.equals(Boolean.valueOf(noUpgradeProp));
+                            boolean upgradeSystemTables = !Boolean.TRUE.equals(Boolean.valueOf(noUpgradeProp)); 
                             Properties scnProps = PropertiesUtil.deepCopy(props);
                             scnProps.setProperty(
                                 PhoenixRuntime.CURRENT_SCN_ATTRIB,
@@ -2328,148 +2339,150 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                             + " is found but client does not have "
                                             + IS_NAMESPACE_MAPPING_ENABLED + " enabled")
                                             .build().buildException(); }
-                            }
-
+                            }   
                             try {
                                 metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
-
                             } catch (NewerTableAlreadyExistsException ignore) {
                                 // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed timestamp.
                                 // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp.
                             } catch (TableAlreadyExistsException e) {
                                 if (upgradeSystemTables) {
-                                    // This will occur if we have an older SYSTEM.CATALOG and we need to update it to include
-                                    // any new columns we've added.
                                     long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
-
-                                    String columnsToAdd = "";
-                                    if(currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) {
-                                        // We know that we always need to add the STORE_NULLS column for 4.3 release
-                                        columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName());
-                                        try (HBaseAdmin admin = getAdmin()) {
-                                            HTableDescriptor[] localIndexTables = admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
-                                            for (HTableDescriptor table : localIndexTables) {
-                                                if (table.getValue(MetaDataUtil.PARENT_TABLE_KEY) == null
-                                                        && table.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME) != null) {
-                                                    table.setValue(MetaDataUtil.PARENT_TABLE_KEY,
-                                                        MetaDataUtil.getUserTableName(table
-                                                            .getNameAsString()));
-                                                    // Explicitly disable, modify and enable the table to ensure co-location of data
-                                                    // and index regions. If we just modify the table descriptor when online schema
-                                                    // change enabled may reopen the region in same region server instead of following data region.
-                                                    admin.disableTable(table.getTableName());
-                                                    admin.modifyTable(table.getTableName(), table);
-                                                    admin.enableTable(table.getTableName());
+                                    sysCatalogTableName = e.getTable().getPhysicalName().getString();
+                                    if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) {
+                                        snapshotName = getUpgradeSnapshotName(sysCatalogTableName, currentServerSideTableTimeStamp);
+                                        createSnapshot(snapshotName, sysCatalogTableName);
+                                    }
+                                        String columnsToAdd = "";
+                                        // This will occur if we have an older SYSTEM.CATALOG and we need to update it to include
+                                        // any new columns we've added.
+                                        if(currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) {
+                                            // We know that we always need to add the STORE_NULLS column for 4.3 release
+                                            columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName());
+                                            try (HBaseAdmin admin = getAdmin()) {
+                                                HTableDescriptor[] localIndexTables = admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
+                                                for (HTableDescriptor table : localIndexTables) {
+                                                    if (table.getValue(MetaDataUtil.PARENT_TABLE_KEY) == null
+                                                            && table.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME) != null) {
+                                                        table.setValue(MetaDataUtil.PARENT_TABLE_KEY,
+                                                            MetaDataUtil.getUserTableName(table
+                                                                .getNameAsString()));
+                                                        // Explicitly disable, modify and enable the table to ensure co-location of data
+                                                        // and index regions. If we just modify the table descriptor when online schema
+                                                        // change enabled may reopen the region in same region server instead of following data region.
+                                                        admin.disableTable(table.getTableName());
+                                                        admin.modifyTable(table.getTableName(), table);
+                                                        admin.enableTable(table.getTableName());
+                                                    }
                                                 }
                                             }
                                         }
-                                    }
-
-                                    // If the server side schema is before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then
-                                    // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too. 
-                                    // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed, 
-                                    // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all 
-                                    // the column names that have been added to SYSTEM.CATALOG since 4.0. 
-                                    if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
-                                        columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName()
-                                            + ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName());
-                                    }
 
-                                    // If we have some new columns from 4.1-4.3 to add, add them now.
-                                    if (!columnsToAdd.isEmpty()) {
-                                        // Ugh..need to assign to another local variable to keep eclipse happy.
-                                        PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
-                                            PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, columnsToAdd);
-                                        metaConnection = newMetaConnection;
-                                    }
+                                        // If the server side schema is before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then
+                                        // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too. 
+                                        // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed, 
+                                        // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all 
+                                        // the column names that have been added to SYSTEM.CATALOG since 4.0. 
+                                        if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
+                                            columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName()
+                                                + ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName());
+                                        }
 
-                                    if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0) {
-                                        columnsToAdd = PhoenixDatabaseMetaData.BASE_COLUMN_COUNT + " "
-                                                + PInteger.INSTANCE.getSqlTypeName();
-                                        try {
-                                            metaConnection = addColumn(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, columnsToAdd, false);
-                                            upgradeTo4_5_0(metaConnection);
-                                        } catch (ColumnAlreadyExistsException ignored) {
-                                            /* 
-                                             * Upgrade to 4.5 is a slightly special case. We use the fact that the column
-                                             * BASE_COLUMN_COUNT is already part of the meta-data schema as the signal that
-                                             * the server side upgrade has finished or is in progress.
-                                             */
-                                            logger.debug("No need to run 4.5 upgrade");
+                                        // If we have some new columns from 4.1-4.3 to add, add them now.
+                                        if (!columnsToAdd.isEmpty()) {
+                                            // Ugh..need to assign to another local variable to keep eclipse happy.
+                                            PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
+                                                PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, columnsToAdd);
+                                            metaConnection = newMetaConnection;
                                         }
-                                        Properties props = PropertiesUtil.deepCopy(metaConnection.getClientInfo());
-                                        props.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB);
-                                        props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
-                                        PhoenixConnection conn = new PhoenixConnection(ConnectionQueryServicesImpl.this, metaConnection.getURL(), props, metaConnection.getMetaDataCache());
-                                        try {
-                                            List<String> tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn);
-                                            if (!tablesNeedingUpgrade.isEmpty()) {
-                                                logger.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n" + Joiner.on(' ').join(tablesNeedingUpgrade) + "\nTo upgrade issue the \"bin/psql.py -u\" command.");
+
+                                        if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0) {
+                                            columnsToAdd = PhoenixDatabaseMetaData.BASE_COLUMN_COUNT + " "
+                                                    + PInteger.INSTANCE.getSqlTypeName();
+                                            try {
+                                                metaConnection = addColumn(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                                    MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, columnsToAdd, false);
+                                                upgradeTo4_5_0(metaConnection);
+                                            } catch (ColumnAlreadyExistsException ignored) {
+                                                /* 
+                                                 * Upgrade to 4.5 is a slightly special case. We use the fact that the column
+                                                 * BASE_COLUMN_COUNT is already part of the meta-data schema as the signal that
+                                                 * the server side upgrade has finished or is in progress.
+                                                 */
+                                                logger.debug("No need to run 4.5 upgrade");
                                             }
-                                            List<String> unsupportedTables = UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn);
-                                            if (!unsupportedTables.isEmpty()) {
-                                                logger.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n" + Joiner.on(' ').join(unsupportedTables));
+                                            Properties props = PropertiesUtil.deepCopy(metaConnection.getClientInfo());
+                                            props.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB);
+                                            props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
+                                            PhoenixConnection conn = new PhoenixConnection(ConnectionQueryServicesImpl.this, metaConnection.getURL(), props, metaConnection.getMetaDataCache());
+                                            try {
+                                                List<String> tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn);
+                                                if (!tablesNeedingUpgrade.isEmpty()) {
+                                                    logger.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n" + Joiner.on(' ').join(tablesNeedingUpgrade) + "\nTo upgrade issue the \"bin/psql.py -u\" command.");
+                                                }
+                                                List<String> unsupportedTables = UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn);
+                                                if (!unsupportedTables.isEmpty()) {
+                                                    logger.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n" + Joiner.on(' ').join(unsupportedTables));
+                                                }
+                                            } catch (Exception ex) {
+                                                logger.error("Unable to determine tables requiring upgrade due to PHOENIX-2067", ex);
+                                            } finally {
+                                                conn.close();
                                             }
-                                        } catch (Exception ex) {
-                                            logger.error("Unable to determine tables requiring upgrade due to PHOENIX-2067", ex);
-                                        } finally {
-                                            conn.close();
                                         }
-                                    }
-                                    // Add these columns one at a time, each with different timestamps so that if folks have
-                                    // run the upgrade code already for a snapshot, we'll still enter this block (and do the
-                                    // parts we haven't yet done).
-                                    if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0) {
-                                        columnsToAdd = PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP + " " + PBoolean.INSTANCE.getSqlTypeName();
-                                        metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd);
-                                    }
-                                    if(currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
-                                        // Drop old stats table so that new stats table is created
-                                        metaConnection = dropStatsTable(metaConnection, 
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4);
-                                        metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3,
-                                            PhoenixDatabaseMetaData.TRANSACTIONAL + " " + PBoolean.INSTANCE.getSqlTypeName());
-                                        metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
-                                            PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + PLong.INSTANCE.getSqlTypeName());
-                                        metaConnection = setImmutableTableIndexesImmutable(metaConnection, 
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
-                                        metaConnection = updateSystemCatalogTimestamp(metaConnection, 
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
-                                        ConnectionQueryServicesImpl.this.removeTable(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
-                                        clearCache();
-                                    }
+                                        // Add these columns one at a time, each with different timestamps so that if folks have
+                                        // run the upgrade code already for a snapshot, we'll still enter this block (and do the
+                                        // parts we haven't yet done).
+                                        if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0) {
+                                            columnsToAdd = PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP + " " + PBoolean.INSTANCE.getSqlTypeName();
+                                            metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd);
+                                        }
+                                        if(currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
+                                            // Drop old stats table so that new stats table is created
+                                            metaConnection = dropStatsTable(metaConnection, 
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4);
+                                            metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3,
+                                                PhoenixDatabaseMetaData.TRANSACTIONAL + " " + PBoolean.INSTANCE.getSqlTypeName());
+                                            metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, 
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2,
+                                                PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + PLong.INSTANCE.getSqlTypeName());
+                                            metaConnection = setImmutableTableIndexesImmutable(metaConnection, 
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1);
+                                            metaConnection = updateSystemCatalogTimestamp(metaConnection, 
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
+                                            ConnectionQueryServicesImpl.this.removeTable(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0);
+                                            clearCache();
+                                        }
 
-                                    if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0) {
-                                        metaConnection = addColumnsIfNotExists(metaConnection,
-                                            PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 2,
-                                            PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED + " "
-                                                    + PBoolean.INSTANCE.getSqlTypeName());
-                                        metaConnection = addColumnsIfNotExists(metaConnection,
-                                            PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 1,
-                                            PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ + " "
-                                                    + PVarchar.INSTANCE.getSqlTypeName());
-                                        metaConnection = addColumnsIfNotExists(metaConnection,
-                                            PhoenixDatabaseMetaData.SYSTEM_CATALOG,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0,
-                                            PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA + " "
-                                                    + PBoolean.INSTANCE.getSqlTypeName());
-                                        metaConnection = UpgradeUtil.disableViewIndexes(metaConnection);
-                                        if(getProps().getBoolean(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB,
-                                            QueryServicesOptions.DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE)) {
-                                            metaConnection = UpgradeUtil.upgradeLocalIndexes(metaConnection);
+                                        if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0) {
+                                            metaConnection = addColumnsIfNotExists(metaConnection,
+                                                PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 2,
+                                                PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED + " "
+                                                        + PBoolean.INSTANCE.getSqlTypeName());
+                                            metaConnection = addColumnsIfNotExists(metaConnection,
+                                                PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 1,
+                                                PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ + " "
+                                                        + PVarchar.INSTANCE.getSqlTypeName());
+                                            metaConnection = addColumnsIfNotExists(metaConnection,
+                                                PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0,
+                                                PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA + " "
+                                                        + PBoolean.INSTANCE.getSqlTypeName());
+                                            metaConnection = UpgradeUtil.disableViewIndexes(metaConnection);
+                                            if(getProps().getBoolean(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB,
+                                                QueryServicesOptions.DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE)) {
+                                                metaConnection = UpgradeUtil.upgradeLocalIndexes(metaConnection);
+                                            }
+                                            ConnectionQueryServicesImpl.this.removeTable(null,
+                                                PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null,
+                                                MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0);
+                                            clearCache();
                                         }
-                                        ConnectionQueryServicesImpl.this.removeTable(null,
-                                            PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null,
-                                            MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0);
-                                        clearCache();
-                                    }
                                 }
                             }
 
@@ -2527,7 +2540,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                     if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) {
                                         metaConnection = addColumnsIfNotExists(
                                             metaConnection,
-                                            PhoenixDatabaseMetaData.SYSTEM_STATS_NAME,
+                                            SYSTEM_STATS_NAME,
                                             MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
                                             PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT + " "
                                                     + PLong.INSTANCE.getSqlTypeName());
@@ -2547,7 +2560,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                             + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA);
                                 } catch (NewerSchemaAlreadyExistsException e) {}
                             }
-                            scheduleRenewLeaseTasks(); 
+                            success = true;
+                            scheduleRenewLeaseTasks();
                         } catch (Exception e) {
                             if (e instanceof SQLException) {
                                 initializationException = (SQLException)e;
@@ -2566,6 +2580,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 }
                             } finally {
                                 try {
+                                    restoreFromSnapshot(sysCatalogTableName, snapshotName, success);
+                                } catch (SQLException e) {
+                                    if (initializationException != null) {
+                                        initializationException.setNextException(e);
+                                    } else {
+                                        initializationException = e;
+                                    }
+                                }
+                                try {
                                     if (initializationException != null) {
                                         throw initializationException;
                                     }
@@ -2578,6 +2601,101 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     return null;
                 }
 
+                private void createSnapshot(String snapshotName, String tableName)
+                        throws SQLException {
+                    HBaseAdmin admin = null;
+                    SQLException sqlE = null;
+                    try {
+                        admin = getAdmin();
+                        admin.snapshot(snapshotName, tableName);
+                        logger.info("Successfully created snapshot " + snapshotName + " for "
+                                + tableName);
+                    } catch (Exception e) {
+                        sqlE = new SQLException(e);
+                    } finally {
+                        try {
+                            if (admin != null) {
+                                admin.close();
+                            }
+                        } catch (Exception e) {
+                            SQLException adminCloseEx = new SQLException(e);
+                            if (sqlE == null) {
+                                sqlE = adminCloseEx;
+                            } else {
+                                sqlE.setNextException(adminCloseEx);
+                            }
+                        } finally {
+                            if (sqlE != null) {
+                                throw sqlE;
+                            }
+                        }
+                    }
+                }
+
+                private void restoreFromSnapshot(String tableName, String snapshotName,
+                        boolean success) throws SQLException {
+                    boolean snapshotRestored = false;
+                    boolean tableDisabled = false;
+                    if (!success && snapshotName != null) {
+                        SQLException sqlE = null;
+                        HBaseAdmin admin = null;
+                        try {
+                            logger.warn("Starting restore of " + tableName + " using snapshot "
+                                    + snapshotName + " because upgrade failed");
+                            admin = getAdmin();
+                            admin.disableTable(tableName);
+                            tableDisabled = true;
+                            admin.restoreSnapshot(snapshotName);
+                            snapshotRestored = true;
+                            logger.warn("Successfully restored " + tableName + " using snapshot "
+                                    + snapshotName);
+                        } catch (Exception e) {
+                            sqlE = new SQLException(e);
+                        } finally {
+                            if (admin != null && tableDisabled) {
+                                try {
+                                    admin.enableTable(tableName);
+                                    if (snapshotRestored) {
+                                        logger.warn("Successfully restored and enabled " + tableName + " using snapshot "
+                                                + snapshotName);
+                                    } else {
+                                        logger.warn("Successfully enabled " + tableName + " after restoring using snapshot "
+                                                + snapshotName + " failed. ");
+                                    }
+                                } catch (Exception e1) {
+                                    SQLException enableTableEx = new SQLException(e1);
+                                    if (sqlE == null) {
+                                        sqlE = enableTableEx;
+                                    } else {
+                                        sqlE.setNextException(enableTableEx);
+                                    }
+                                    logger.error("Failure in enabling "
+                                            + tableName
+                                            + (snapshotRestored ? " after successfully restoring using snapshot"
+                                                    + snapshotName
+                                                    : " after restoring using snapshot "
+                                                            + snapshotName + " failed. "));
+                                } finally {
+                                    try {
+                                        admin.close();
+                                    } catch (Exception e2) {
+                                        SQLException adminCloseEx = new SQLException(e2);
+                                        if (sqlE == null) {
+                                            sqlE = adminCloseEx;
+                                        } else {
+                                            sqlE.setNextException(adminCloseEx);
+                                        }
+                                    } finally {
+                                        if (sqlE != null) {
+                                            throw sqlE;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+                
                 private void ensureSystemTablesUpgraded(ReadOnlyProps props)
                         throws SQLException, IOException, IllegalArgumentException, InterruptedException {
                     if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props)) { return; }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8ac187b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index d7ed01a..51a8139 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.util;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.phoenix.coprocessor.MetaDataProtocol.CURRENT_CLIENT_VERSION;
+import static org.apache.phoenix.coprocessor.MetaDataProtocol.TIMESTAMP_VERSION_MAP;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
@@ -51,10 +53,13 @@ import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_
 
 import java.io.IOException;
 import java.sql.Connection;
+import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.text.Format;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -1888,4 +1893,10 @@ public class UpgradeUtil {
         }
     }
 
+    public static final String getUpgradeSnapshotName(String tableString, long currentSystemTableTimestamp) {
+        Format formatter = new SimpleDateFormat("yyyyMMddHHmmssZ");
+        String date = formatter.format(new Date(System.currentTimeMillis()));
+        String upgradingFrom = TIMESTAMP_VERSION_MAP.get(currentSystemTableTimestamp);
+        return "SNAPSHOT_" + tableString + "_" + upgradingFrom + "_TO_" + CURRENT_CLIENT_VERSION + "_" + date;
+    }
 }
\ No newline at end of file


[15/16] phoenix git commit: PHOENIX-3148 Reduce size of PTable so that more tables can be cached in the metada cache

Posted by td...@apache.org.
PHOENIX-3148 Reduce size of PTable so that more tables can be cached in the metada cache


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b6c31ef5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b6c31ef5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b6c31ef5

Branch: refs/heads/4.x-HBase-1.0
Commit: b6c31ef5234d963c3eca15fa3ee346f625d4be29
Parents: f2e0ab2
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Fri Aug 19 16:39:28 2016 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Tue Aug 23 22:36:51 2016 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/IndexToolIT.java |   1 -
 .../apache/phoenix/execute/MutationState.java   |   6 +-
 .../query/ConnectionQueryServicesImpl.java      |   5 +-
 .../query/ConnectionlessQueryServicesImpl.java  |   4 +-
 .../org/apache/phoenix/query/QueryServices.java |   2 +
 .../phoenix/query/QueryServicesOptions.java     |   3 +
 .../apache/phoenix/schema/MetaDataClient.java   |  25 +-
 .../apache/phoenix/schema/PMetaDataCache.java   | 221 +++++++++++++++
 .../apache/phoenix/schema/PMetaDataImpl.java    | 268 +++----------------
 .../org/apache/phoenix/schema/PTableImpl.java   |  29 +-
 .../org/apache/phoenix/schema/PTableRef.java    |  56 ++--
 .../apache/phoenix/schema/PTableRefFactory.java |  52 ++++
 .../apache/phoenix/schema/PTableRefImpl.java    |  39 +++
 .../phoenix/schema/SerializedPTableRef.java     |  47 ++++
 .../schema/SerializedPTableRefFactory.java      |  37 +++
 .../phoenix/schema/PMetaDataImplTest.java       |  34 ++-
 16 files changed, 532 insertions(+), 297 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
index c66fea3..16db876 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
@@ -37,7 +37,6 @@ import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.mapreduce.index.IndexTool;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.PropertiesUtil;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 38d24aa..359565a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -681,7 +681,7 @@ public class MutationState implements SQLCloseable {
             return Iterators.emptyIterator();
         }
         Long scn = connection.getSCN();
-        final long timestamp = (tableTimestamp!=null && tableTimestamp!=QueryConstants.UNSET_TIMESTAMP) ? tableTimestamp : (scn == null ? HConstants.LATEST_TIMESTAMP : scn);
+        final long timestamp = getMutationTimestamp(tableTimestamp, scn);
         return new Iterator<Pair<byte[],List<Mutation>>>() {
             private Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> current = iterator.next();
             private Iterator<Pair<byte[],List<Mutation>>> innerIterator = init();
@@ -727,6 +727,10 @@ public class MutationState implements SQLCloseable {
             
         };
     }
+
+    public static long getMutationTimestamp(final Long tableTimestamp, Long scn) {
+        return (tableTimestamp!=null && tableTimestamp!=QueryConstants.UNSET_TIMESTAMP) ? tableTimestamp : (scn == null ? HConstants.LATEST_TIMESTAMP : scn);
+    }
         
     /**
      * Validates that the meta data is valid against the server meta data if we haven't yet done so.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d55e5ca..967870c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -35,6 +35,7 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS;
+import static org.apache.phoenix.util.UpgradeUtil.getUpgradeSnapshotName;
 import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
@@ -290,9 +291,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
              });
     
     private PMetaData newEmptyMetaData() {
-        long maxSizeBytes = props.getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
-                QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
-        return new PSynchronizedMetaData(new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes));
+        return new PSynchronizedMetaData(new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, getProps()));
     }
     
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 25aca74..560b5d9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -144,9 +144,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     private PMetaData newEmptyMetaData() {
-        long maxSizeBytes = getProps().getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
-                QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
-        return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes);
+        return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, getProps());
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 42f954a..dfe9fb7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -223,6 +223,8 @@ public interface QueryServices extends SQLCloseable {
     public static final String LIMITED_QUERY_SERIAL_THRESHOLD = "phoenix.limited.query.serial.threshold";
 
     public static final String INDEX_ASYNC_BUILD_ENABLED = "phoenix.index.async.build.enabled";
+    
+    public static final String CLIENT_CACHE_ENCODING = "phoenix.table.client.cache.encoding";
     /**
      * Get executor service used for parallel scans
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 70b85db..73e6073 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.phoenix.schema.SaltingUtil;
+import org.apache.phoenix.schema.PTableRefFactory;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -256,6 +257,8 @@ public class QueryServicesOptions {
     public static final float DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD = 0.2f;
     
     public static final boolean DEFAULT_INDEX_ASYNC_BUILD_ENABLED = true;
+    
+    public static final String DEFAULT_CLIENT_CACHE_ENCODING = PTableRefFactory.Encoding.OBJECT.toString();
 
     @SuppressWarnings("serial")
     public static final Set<String> DEFAULT_QUERY_SERVER_SKIP_WORDS = new HashSet<String>() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 7f97f4a..efe60ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -3147,7 +3147,7 @@ public class MetaDataClient {
         }
     }
 
-    private String dropColumnMutations(PTable table, List<PColumn> columnsToDrop, List<Mutation> tableMetaData) throws SQLException {
+    private String dropColumnMutations(PTable table, List<PColumn> columnsToDrop) throws SQLException {
         String tenantId = connection.getTenantId() == null ? "" : connection.getTenantId().getString();
         String schemaName = table.getSchemaName().getString();
         String tableName = table.getTableName().getString();
@@ -3263,7 +3263,9 @@ public class MetaDataClient {
                     columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
                 }
 
-                dropColumnMutations(table, tableColumnsToDrop, tableMetaData);
+                dropColumnMutations(table, tableColumnsToDrop);
+                boolean removedIndexTableOrColumn=false;
+                Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
                 for (PTable index : table.getIndexes()) {
                     IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                     // get the columns required for the index pk
@@ -3278,6 +3280,7 @@ public class MetaDataClient {
                             if (index.getViewIndexId()==null) 
                                 indexesToDrop.add(new TableRef(index));
                             connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
+                            removedIndexTableOrColumn = true;
                         } 
                         else if (coveredColumns.contains(columnToDropRef)) {
                             String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
@@ -3285,15 +3288,18 @@ public class MetaDataClient {
                             indexColumnsToDrop.add(indexColumn);
                             // add the index column to be dropped so that we actually delete the column values
                             columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
+                            removedIndexTableOrColumn = true;
                         }
                     }
                     if(!indexColumnsToDrop.isEmpty()) {
-                        incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
-                        dropColumnMutations(index, indexColumnsToDrop, tableMetaData);
+                        long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
+                        dropColumnMutations(index, indexColumnsToDrop);
+                        long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
+                        connection.removeColumn(tenantId, index.getName().getString(),
+                            indexColumnsToDrop, clientTimestamp, indexTableSeqNum,
+                            TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
                     }
-
                 }
-                Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
                 tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
                 connection.rollback();
 
@@ -3348,8 +3354,11 @@ public class MetaDataClient {
                     // If we've done any index metadata updates, don't bother trying to update
                     // client-side cache as it would be too painful. Just let it pull it over from
                     // the server when needed.
-                    if (tableColumnsToDrop.size() > 0 && indexesToDrop.isEmpty()) {
-                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
+                    if (tableColumnsToDrop.size() > 0) {
+                        if (removedIndexTableOrColumn)
+                            connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
+                        else  
+                            connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
                     }
                     // If we have a VIEW, then only delete the metadata, and leave the table data alone
                     if (table.getType() != PTableType.VIEW) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java
new file mode 100644
index 0000000..9992adb
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
+import org.apache.phoenix.util.TimeKeeper;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.MinMaxPriorityQueue;
+import com.google.common.primitives.Longs;
+
+class PMetaDataCache implements Cloneable {
+    private static final int MIN_REMOVAL_SIZE = 3;
+    private static final Comparator<PTableRef> COMPARATOR = new Comparator<PTableRef>() {
+        @Override
+        public int compare(PTableRef tableRef1, PTableRef tableRef2) {
+            return Longs.compare(tableRef1.getLastAccessTime(), tableRef2.getLastAccessTime());
+        }
+    };
+    private static final MinMaxPriorityQueue.Builder<PTableRef> BUILDER = MinMaxPriorityQueue.orderedBy(COMPARATOR);
+    
+    private long currentByteSize;
+    private final long maxByteSize;
+    private final int expectedCapacity;
+    private final TimeKeeper timeKeeper;
+    private final PTableRefFactory tableRefFactory;
+
+    private final Map<PTableKey,PTableRef> tables;
+    final Map<PTableKey,PFunction> functions;
+    final Map<PTableKey,PSchema> schemas;
+    
+    private static Map<PTableKey,PTableRef> newMap(int expectedCapacity) {
+        // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
+        // safely across multiple threads (as the underlying collection is not thread safe).
+        // Instead, we track access time and prune it based on the copy we've made.
+        return Maps.newHashMapWithExpectedSize(expectedCapacity);
+    }
+
+    private static Map<PTableKey,PFunction> newFunctionMap(int expectedCapacity) {
+        // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
+        // safely across multiple threads (as the underlying collection is not thread safe).
+        // Instead, we track access time and prune it based on the copy we've made.
+        return Maps.newHashMapWithExpectedSize(expectedCapacity);
+    }
+
+    private static Map<PTableKey,PSchema> newSchemaMap(int expectedCapacity) {
+        // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
+        // safely across multiple threads (as the underlying collection is not thread safe).
+        // Instead, we track access time and prune it based on the copy we've made.
+        return Maps.newHashMapWithExpectedSize(expectedCapacity);
+    }
+
+    private Map<PTableKey,PTableRef> cloneMap(Map<PTableKey,PTableRef> tables, int expectedCapacity) {
+        Map<PTableKey,PTableRef> newTables = newMap(Math.max(tables.size(),expectedCapacity));
+        // Copy value so that access time isn't changing anymore
+        for (PTableRef tableAccess : tables.values()) {
+            newTables.put(tableAccess.getTable().getKey(), tableRefFactory.makePTableRef(tableAccess));
+        }
+        return newTables;
+    }
+
+    private static Map<PTableKey, PSchema> cloneSchemaMap(Map<PTableKey, PSchema> schemas, int expectedCapacity) {
+        Map<PTableKey, PSchema> newSchemas = newSchemaMap(Math.max(schemas.size(), expectedCapacity));
+        // Copy value so that access time isn't changing anymore
+        for (PSchema schema : schemas.values()) {
+            newSchemas.put(schema.getSchemaKey(), new PSchema(schema));
+        }
+        return newSchemas;
+    }
+
+    private static Map<PTableKey,PFunction> cloneFunctionsMap(Map<PTableKey,PFunction> functions, int expectedCapacity) {
+        Map<PTableKey,PFunction> newFunctions = newFunctionMap(Math.max(functions.size(),expectedCapacity));
+        for (PFunction functionAccess : functions.values()) {
+            newFunctions.put(functionAccess.getKey(), new PFunction(functionAccess));
+        }
+        return newFunctions;
+    }
+
+    PMetaDataCache(PMetaDataCache toClone) {
+        this.tableRefFactory = toClone.tableRefFactory;
+        this.timeKeeper = toClone.timeKeeper;
+        this.maxByteSize = toClone.maxByteSize;
+        this.currentByteSize = toClone.currentByteSize;
+        this.expectedCapacity = toClone.expectedCapacity;
+        this.tables = cloneMap(toClone.tables, expectedCapacity);
+        this.functions = cloneFunctionsMap(toClone.functions, expectedCapacity);
+        this.schemas = cloneSchemaMap(toClone.schemas, expectedCapacity);
+    }
+    
+    public PMetaDataCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper, PTableRefFactory tableRefFactory) {
+        this.currentByteSize = 0;
+        this.maxByteSize = maxByteSize;
+        this.expectedCapacity = initialCapacity;
+        this.tables = newMap(this.expectedCapacity);
+        this.functions = newFunctionMap(this.expectedCapacity);
+        this.timeKeeper = timeKeeper;
+        this.schemas = newSchemaMap(this.expectedCapacity);
+        this.tableRefFactory = tableRefFactory;
+    }
+    
+    public PTableRef get(PTableKey key) {
+        PTableRef tableAccess = this.tables.get(key);
+        if (tableAccess == null) {
+            return null;
+        }
+        tableAccess.setLastAccessTime(timeKeeper.getCurrentTime());
+        return tableAccess;
+    }
+    
+    @Override
+    public PMetaDataCache clone() {
+        return new PMetaDataCache(this);
+    }
+    
+    /**
+     * Used when the cache is growing past its max size to clone in a single pass.
+     * Removes least recently used tables to get size of cache below its max size by
+     * the overage amount.
+     */
+    public PMetaDataCache cloneMinusOverage(long overage) {
+        assert(overage > 0);
+        int nToRemove = Math.max(MIN_REMOVAL_SIZE, (int)Math.ceil((currentByteSize-maxByteSize) / ((double)currentByteSize / size())) + 1);
+        MinMaxPriorityQueue<PTableRef> toRemove = BUILDER.expectedSize(nToRemove).create();
+        PMetaDataCache newCache = new PMetaDataCache(this.size(), this.maxByteSize, this.timeKeeper, this.tableRefFactory);
+        
+        long toRemoveBytes = 0;
+        // Add to new cache, but track references to remove when done
+        // to bring cache at least overage amount below it's max size.
+        for (PTableRef tableRef : this.tables.values()) {
+            newCache.put(tableRef.getTable().getKey(), tableRefFactory.makePTableRef(tableRef));
+            toRemove.add(tableRef);
+            toRemoveBytes += tableRef.getEstimatedSize();
+            while (toRemoveBytes - toRemove.peekLast().getEstimatedSize() >= overage) {
+                PTableRef removedRef = toRemove.removeLast();
+                toRemoveBytes -= removedRef.getEstimatedSize();
+            }
+        }
+        for (PTableRef toRemoveRef : toRemove) {
+            newCache.remove(toRemoveRef.getTable().getKey());
+        }
+        return newCache;
+    }
+
+    PTable put(PTableKey key, PTableRef ref) {
+        currentByteSize += ref.getEstimatedSize();
+        PTableRef oldTableAccess = this.tables.put(key, ref);
+        PTable oldTable = null;
+        if (oldTableAccess != null) {
+            currentByteSize -= oldTableAccess.getEstimatedSize();
+            oldTable = oldTableAccess.getTable();
+        }
+        return oldTable;
+    }
+
+    public long getAge(PTableRef ref) {
+        return timeKeeper.getCurrentTime() - ref.getCreateTime();
+    }
+    
+    public PTable remove(PTableKey key) {
+        PTableRef value = this.tables.remove(key);
+        if (value == null) {
+            return null;
+        }
+        currentByteSize -= value.getEstimatedSize();
+        return value.getTable();
+    }
+    
+    public Iterator<PTable> iterator() {
+        final Iterator<PTableRef> iterator = this.tables.values().iterator();
+        return new Iterator<PTable>() {
+
+            @Override
+            public boolean hasNext() {
+                return iterator.hasNext();
+            }
+
+            @Override
+            public PTable next() {
+                return iterator.next().getTable();
+            }
+
+            @Override
+            public void remove() {
+                throw new UnsupportedOperationException();
+            }
+            
+        };
+    }
+
+    public int size() {
+        return this.tables.size();
+    }
+
+    public long getCurrentSize() {
+        return this.currentByteSize;
+    }
+
+    public long getMaxSize() {
+        return this.maxByteSize;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 5ffacca..7a78006 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -18,253 +18,50 @@
 package org.apache.phoenix.schema;
 
 import java.sql.SQLException;
-import java.util.Comparator;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PSchema;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TimeKeeper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.MinMaxPriorityQueue;
-import com.google.common.primitives.Longs;
 
 /**
- * 
- * Client-side cache of MetaData. Not thread safe, but meant to be used
- * in a copy-on-write fashion. Internally uses a LinkedHashMap that evicts
- * the oldest entries when size grows beyond the maxSize specified at
- * create time.
- *
+ * Client-side cache of MetaData, not thread safe. Internally uses a LinkedHashMap that evicts the
+ * oldest entries when size grows beyond the maxSize specified at create time.
  */
 public class PMetaDataImpl implements PMetaData {
-    private static final Logger logger = LoggerFactory.getLogger(PMetaDataImpl.class);
-        static class PMetaDataCache implements Cloneable {
-            private static final int MIN_REMOVAL_SIZE = 3;
-            private static final Comparator<PTableRef> COMPARATOR = new Comparator<PTableRef>() {
-                @Override
-                public int compare(PTableRef tableRef1, PTableRef tableRef2) {
-                    return Longs.compare(tableRef1.getLastAccessTime(), tableRef2.getLastAccessTime());
-                }
-            };
-            private static final MinMaxPriorityQueue.Builder<PTableRef> BUILDER = MinMaxPriorityQueue.orderedBy(COMPARATOR);
-            
-            private long currentByteSize;
-            private final long maxByteSize;
-            private final int expectedCapacity;
-            private final TimeKeeper timeKeeper;
-
-            private final Map<PTableKey,PTableRef> tables;
-            private final Map<PTableKey,PFunction> functions;
-            private final Map<PTableKey,PSchema> schemas;
-            
-            private static Map<PTableKey,PTableRef> newMap(int expectedCapacity) {
-                // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
-                // safely across multiple threads (as the underlying collection is not thread safe).
-                // Instead, we track access time and prune it based on the copy we've made.
-                return Maps.newHashMapWithExpectedSize(expectedCapacity);
-            }
-
-            private static Map<PTableKey,PFunction> newFunctionMap(int expectedCapacity) {
-                // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
-                // safely across multiple threads (as the underlying collection is not thread safe).
-                // Instead, we track access time and prune it based on the copy we've made.
-                return Maps.newHashMapWithExpectedSize(expectedCapacity);
-            }
-
-            private static Map<PTableKey,PSchema> newSchemaMap(int expectedCapacity) {
-                // Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
-                // safely across multiple threads (as the underlying collection is not thread safe).
-                // Instead, we track access time and prune it based on the copy we've made.
-                return Maps.newHashMapWithExpectedSize(expectedCapacity);
-            }
-
-            private static Map<PTableKey,PTableRef> cloneMap(Map<PTableKey,PTableRef> tables, int expectedCapacity) {
-                Map<PTableKey,PTableRef> newTables = newMap(Math.max(tables.size(),expectedCapacity));
-                // Copy value so that access time isn't changing anymore
-                for (PTableRef tableAccess : tables.values()) {
-                    newTables.put(tableAccess.getTable().getKey(), new PTableRef(tableAccess));
-                }
-                return newTables;
-            }
-
-            private static Map<PTableKey, PSchema> cloneSchemaMap(Map<PTableKey, PSchema> schemas, int expectedCapacity) {
-                Map<PTableKey, PSchema> newSchemas = newSchemaMap(Math.max(schemas.size(), expectedCapacity));
-                // Copy value so that access time isn't changing anymore
-                for (PSchema schema : schemas.values()) {
-                    newSchemas.put(schema.getSchemaKey(), new PSchema(schema));
-                }
-                return newSchemas;
-            }
-
-            private static Map<PTableKey,PFunction> cloneFunctionsMap(Map<PTableKey,PFunction> functions, int expectedCapacity) {
-                Map<PTableKey,PFunction> newFunctions = newFunctionMap(Math.max(functions.size(),expectedCapacity));
-                for (PFunction functionAccess : functions.values()) {
-                    newFunctions.put(functionAccess.getKey(), new PFunction(functionAccess));
-                }
-                return newFunctions;
-            }
-
-            private PMetaDataCache(PMetaDataCache toClone) {
-                this.timeKeeper = toClone.timeKeeper;
-                this.maxByteSize = toClone.maxByteSize;
-                this.currentByteSize = toClone.currentByteSize;
-                this.expectedCapacity = toClone.expectedCapacity;
-                this.tables = cloneMap(toClone.tables, expectedCapacity);
-                this.functions = cloneFunctionsMap(toClone.functions, expectedCapacity);
-                this.schemas = cloneSchemaMap(toClone.schemas, expectedCapacity);
-            }
-            
-            public PMetaDataCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
-                this.currentByteSize = 0;
-                this.maxByteSize = maxByteSize;
-                this.expectedCapacity = initialCapacity;
-                this.tables = newMap(this.expectedCapacity);
-                this.functions = newFunctionMap(this.expectedCapacity);
-                this.timeKeeper = timeKeeper;
-                this.schemas = newSchemaMap(this.expectedCapacity);
-            }
-            
-            public PTableRef get(PTableKey key) {
-                PTableRef tableAccess = this.tables.get(key);
-                if (tableAccess == null) {
-                    return null;
-                }
-                tableAccess.setLastAccessTime(timeKeeper.getCurrentTime());
-                return tableAccess;
-            }
-            
-            @Override
-            public PMetaDataCache clone() {
-                return new PMetaDataCache(this);
-            }
-
-            /**
-             * Used when the cache is growing past its max size to clone in a single pass.
-             * Removes least recently used tables to get size of cache below its max size by
-             * the overage amount.
-             */
-            public PMetaDataCache cloneMinusOverage(long overage) {
-                assert(overage > 0);
-                int nToRemove = Math.max(MIN_REMOVAL_SIZE, (int)Math.ceil((currentByteSize-maxByteSize) / ((double)currentByteSize / size())) + 1);
-                MinMaxPriorityQueue<PTableRef> toRemove = BUILDER.expectedSize(nToRemove).create();
-                PMetaDataCache newCache = new PMetaDataCache(this.size(), this.maxByteSize, this.timeKeeper);
-                
-                long toRemoveBytes = 0;
-                // Add to new cache, but track references to remove when done
-                // to bring cache at least overage amount below it's max size.
-                for (PTableRef tableRef : this.tables.values()) {
-                    newCache.put(tableRef.getTable().getKey(), new PTableRef(tableRef));
-                    toRemove.add(tableRef);
-                    toRemoveBytes += tableRef.getEstSize();
-                    while (toRemoveBytes - toRemove.peekLast().getEstSize() >= overage) {
-                        PTableRef removedRef = toRemove.removeLast();
-                        toRemoveBytes -= removedRef.getEstSize();
-                    }
-                }
-                for (PTableRef toRemoveRef : toRemove) {
-                    newCache.remove(toRemoveRef.getTable().getKey());
-                }
-                return newCache;
-            }
-
-            private PTable put(PTableKey key, PTableRef ref) {
-                currentByteSize += ref.getEstSize();
-                PTableRef oldTableAccess = this.tables.put(key, ref);
-                PTable oldTable = null;
-                if (oldTableAccess != null) {
-                    currentByteSize -= oldTableAccess.getEstSize();
-                    oldTable = oldTableAccess.getTable();
-                }
-                return oldTable;
-            }
-
-            public PTable put(PTableKey key, PTable value, long resolvedTime) {
-                return put(key, new PTableRef(value, timeKeeper.getCurrentTime(), resolvedTime));
-            }
-            
-            public PTable putDuplicate(PTableKey key, PTable value, long resolvedTime) {
-                return put(key, new PTableRef(value, timeKeeper.getCurrentTime(), 0, resolvedTime));
-            }
-            
-            public long getAge(PTableRef ref) {
-                return timeKeeper.getCurrentTime() - ref.getCreateTime();
-            }
-            
-            public PTable remove(PTableKey key) {
-                PTableRef value = this.tables.remove(key);
-                if (value == null) {
-                    return null;
-                }
-                currentByteSize -= value.getEstSize();
-                return value.getTable();
-            }
-            
-            public Iterator<PTable> iterator() {
-                final Iterator<PTableRef> iterator = this.tables.values().iterator();
-                return new Iterator<PTable>() {
-
-                    @Override
-                    public boolean hasNext() {
-                        return iterator.hasNext();
-                    }
-
-                    @Override
-                    public PTable next() {
-                        return iterator.next().getTable();
-                    }
-
-                    @Override
-                    public void remove() {
-                        throw new UnsupportedOperationException();
-                    }
-                    
-                };
-            }
-
-            public int size() {
-                return this.tables.size();
-            }
-
-            public long getCurrentSize() {
-                return this.currentByteSize;
-            }
-
-            public long getMaxSize() {
-                return this.maxByteSize;
-            }
-        }
-            
-    private PMetaDataCache metaData;
     
-    @VisibleForTesting
-    public PMetaDataCache getMetaData() {
-        return metaData;
-    }
+    private PMetaDataCache metaData;
+    private final TimeKeeper timeKeeper;
+    private final PTableRefFactory tableRefFactory;
     
-    public PMetaDataImpl(int initialCapacity, long maxByteSize) {
-        this.metaData = new PMetaDataCache(initialCapacity, maxByteSize, TimeKeeper.SYSTEM);
+    public PMetaDataImpl(int initialCapacity, ReadOnlyProps props) {
+        this(initialCapacity, TimeKeeper.SYSTEM, props);
     }
 
-    public PMetaDataImpl(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) {
-        this.metaData = new PMetaDataCache(initialCapacity, maxByteSize, timeKeeper);
+    public PMetaDataImpl(int initialCapacity, TimeKeeper timeKeeper, ReadOnlyProps props) {
+        this(new PMetaDataCache(initialCapacity, props.getLong(
+            QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
+            QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE), timeKeeper,
+                PTableRefFactory.getFactory(props)), timeKeeper, PTableRefFactory.getFactory(props));
     }
 
-    private PMetaDataImpl(PMetaDataCache metaData) {
+    private PMetaDataImpl(PMetaDataCache metaData, TimeKeeper timeKeeper, PTableRefFactory tableRefFactory) {
+        this.timeKeeper = timeKeeper;
         this.metaData = metaData;
+        this.tableRefFactory = tableRefFactory;
     }
-    
+
     @Override
     public PMetaDataImpl clone() {
-        return new PMetaDataImpl(new PMetaDataCache(this.metaData));
+        return new PMetaDataImpl(new PMetaDataCache(this.metaData), this.timeKeeper, this.tableRefFactory);
     }
     
     @Override
@@ -292,18 +89,20 @@ public class PMetaDataImpl implements PMetaData {
 
     @Override
     public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
-    	metaData.putDuplicate(table.getKey(), table, resolvedTimestamp);
+    	metaData.put(table.getKey(), tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTimestamp));
     }
 
     @Override
     public void addTable(PTable table, long resolvedTime) throws SQLException {
+        PTableRef tableRef = tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTime);
         int netGain = 0;
         PTableKey key = table.getKey();
         PTableRef oldTableRef = metaData.get(key);
         if (oldTableRef != null) {
-            netGain -= oldTableRef.getEstSize();
+            netGain -= oldTableRef.getEstimatedSize();
         }
         PTable newParentTable = null;
+        PTableRef newParentTableRef = null;
         long parentResolvedTimestamp = resolvedTime;
         if (table.getParentName() != null) { // Upsert new index table into parent data table list
             String parentName = table.getParentName().getString();
@@ -321,25 +120,26 @@ public class PMetaDataImpl implements PMetaData {
                     }
                 }
                 newIndexes.add(table);
-                netGain -= oldParentRef.getEstSize();
+                netGain -= oldParentRef.getEstimatedSize();
                 newParentTable = PTableImpl.makePTable(oldParentRef.getTable(), table.getTimeStamp(), newIndexes);
-                netGain += newParentTable.getEstimatedSize();
+                newParentTableRef = tableRefFactory.makePTableRef(newParentTable, this.timeKeeper.getCurrentTime(), parentResolvedTimestamp);
+                netGain += newParentTableRef.getEstimatedSize();
             }
         }
         if (newParentTable == null) { // Don't count in gain if we found a parent table, as its accounted for in newParentTable
-            netGain += table.getEstimatedSize();
+            netGain += tableRef.getEstimatedSize();
         }
         long overage = metaData.getCurrentSize() + netGain - metaData.getMaxSize();
         metaData = overage <= 0 ? metaData : metaData.cloneMinusOverage(overage);
         
         if (newParentTable != null) { // Upsert new index table into parent data table list
-            metaData.put(newParentTable.getKey(), newParentTable, parentResolvedTimestamp);
-            metaData.putDuplicate(table.getKey(), table, resolvedTime);
+            metaData.put(newParentTable.getKey(), newParentTableRef);
+            metaData.put(table.getKey(), tableRef);
         } else {
-            metaData.put(table.getKey(), table, resolvedTime);
+            metaData.put(table.getKey(), tableRef);
         }
         for (PTable index : table.getIndexes()) {
-            metaData.putDuplicate(index.getKey(), index, resolvedTime);
+            metaData.put(index.getKey(), tableRefFactory.makePTableRef(index, this.timeKeeper.getCurrentTime(), resolvedTime));
         }
     }
 
@@ -401,7 +201,7 @@ public class PMetaDataImpl implements PMetaData {
                                 parentTableRef.getTable(),
                                 tableTimeStamp == HConstants.LATEST_TIMESTAMP ? parentTableRef.getTable().getTimeStamp() : tableTimeStamp,
                                 newIndexes);
-                        metaData.put(parentTable.getKey(), parentTable, parentTableRef.getResolvedTimeStamp());
+                        metaData.put(parentTable.getKey(), tableRefFactory.makePTableRef(parentTable, this.timeKeeper.getCurrentTime(), parentTableRef.getResolvedTimeStamp()));
                         break;
                     }
                 }
@@ -444,7 +244,7 @@ public class PMetaDataImpl implements PMetaData {
             
             table = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
         }
-        tables.put(table.getKey(), table, resolvedTime);
+        tables.put(table.getKey(), tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTime));
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 92c49f9..c485a30 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -77,7 +77,6 @@ import com.google.common.collect.Maps;
  * storing data in a single column (ColumnLayout.SINGLE) or in
  * multiple columns (ColumnLayout.MULTI).
  *
- * TODO add hashCode and equal methods to check equality of two PTableImpl objects.
  * @since 0.1
  */
 public class PTableImpl implements PTable {
@@ -1073,9 +1072,9 @@ public class PTableImpl implements PTable {
       List<PName> physicalNames = Collections.emptyList();
       if (tableType == PTableType.VIEW) {
         viewType = ViewType.fromSerializedValue(table.getViewType().toByteArray()[0]);
-        if(table.hasViewStatement()){
-          viewStatement = (String) PVarchar.INSTANCE.toObject(table.getViewStatement().toByteArray());
-        }
+      }
+      if(table.hasViewStatement()){
+        viewStatement = (String) PVarchar.INSTANCE.toObject(table.getViewStatement().toByteArray());
       }
       if (tableType == PTableType.VIEW || viewIndexId != null) {
         physicalNames = Lists.newArrayListWithExpectedSize(table.getPhysicalNamesCount());
@@ -1181,6 +1180,8 @@ public class PTableImpl implements PTable {
       builder.setTransactional(table.isTransactional());
       if(table.getType() == PTableType.VIEW){
         builder.setViewType(ByteStringer.wrap(new byte[]{table.getViewType().getSerializedValue()}));
+      }
+      if(table.getViewStatement()!=null){
         builder.setViewStatement(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getViewStatement())));
       }
       if(table.getType() == PTableType.VIEW || table.getViewIndexId() != null){
@@ -1244,4 +1245,24 @@ public class PTableImpl implements PTable {
     public boolean isAppendOnlySchema() {
         return isAppendOnlySchema;
     }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((key == null) ? 0 : key.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) return true;
+        if (obj == null) return false;
+        if (getClass() != obj.getClass()) return false;
+        PTableImpl other = (PTableImpl) obj;
+        if (key == null) {
+            if (other.key != null) return false;
+        } else if (!key.equals(other.key)) return false;
+        return true;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRef.java
index c4bc510..0a601b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRef.java
@@ -17,28 +17,19 @@
  */
 package org.apache.phoenix.schema;
 
-public class PTableRef {
-    private final PTable table;
-    private final int estSize;
-    private final long createTime;
-    private final long resolvedTimeStamp;
-	private volatile long lastAccessTime;
+public abstract class PTableRef {
     
-    public PTableRef(PTable table, long lastAccessTime, int estSize, long resolvedTime) {
-        this.table = table;
+    protected final int estSize;
+    protected final long createTime;
+    protected final long resolvedTimeStamp;
+    protected volatile long lastAccessTime;
+    
+    public PTableRef(long lastAccessTime, long resolvedTime, int estimatedSize) {
         this.lastAccessTime = lastAccessTime;
-        this.estSize = estSize;
+        this.estSize = estimatedSize;
         this.resolvedTimeStamp = resolvedTime;
         this.createTime = lastAccessTime;
     }
-
-    public PTableRef(PTable table, long lastAccessTime, long resolvedTime) {
-        this (table, lastAccessTime, table.getEstimatedSize(), resolvedTime);
-    }
-
-    public PTableRef(PTableRef tableRef) {
-        this (tableRef.table, tableRef.lastAccessTime, tableRef.estSize, tableRef.resolvedTimeStamp);
-    }
     
     /**
      * Tracks how long this entry has been in the cache
@@ -48,23 +39,22 @@ public class PTableRef {
         return createTime;
     }
     
-    public PTable getTable() {
-		return table;
-	}
+    public abstract PTable getTable();
 
-	public long getResolvedTimeStamp() {
-		return resolvedTimeStamp;
-	}
-	
-    public int getEstSize() {
-		return estSize;
-	}
+    public long getResolvedTimeStamp() {
+        return resolvedTimeStamp;
+    }
+    
+    public int getEstimatedSize() {
+        return estSize;
+    }
 
-	public long getLastAccessTime() {
-		return lastAccessTime;
-	}
+    public long getLastAccessTime() {
+        return lastAccessTime;
+    }
 
-	public void setLastAccessTime(long lastAccessTime) {
-		this.lastAccessTime = lastAccessTime;
-	}
+    public void setLastAccessTime(long lastAccessTime) {
+        this.lastAccessTime = lastAccessTime;
+    }
+	
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java
new file mode 100644
index 0000000..14eb235
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.ReadOnlyProps;
+
+public class PTableRefFactory {
+    public PTableRef makePTableRef(PTable table, long lastAccessTime, long resolvedTime) {
+        return new PTableRefImpl(table, lastAccessTime, resolvedTime, table.getEstimatedSize());
+    }
+
+    public PTableRef makePTableRef(PTableRef tableRef) {
+        return new PTableRefImpl(tableRef);
+    }
+
+    private static final PTableRefFactory INSTANCE = new PTableRefFactory();
+
+    public static enum Encoding {
+        OBJECT, PROTOBUF
+    };
+
+    public static PTableRefFactory getFactory(ReadOnlyProps props) {
+        String encodingEnumString =
+                props.get(QueryServices.CLIENT_CACHE_ENCODING,
+                    QueryServicesOptions.DEFAULT_CLIENT_CACHE_ENCODING);
+        Encoding encoding = Encoding.valueOf(encodingEnumString.toUpperCase());
+        switch (encoding) {
+        case PROTOBUF:
+            return SerializedPTableRefFactory.getFactory();
+        case OBJECT:
+        default:
+            return INSTANCE;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java
new file mode 100644
index 0000000..ffc5c2b
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+
+public class PTableRefImpl extends PTableRef {
+    
+    private final PTable table;
+    
+    public PTableRefImpl(PTable table, long lastAccessTime, long resolvedTime, int estimatedSize) {
+        super(lastAccessTime, resolvedTime, estimatedSize);
+        this.table = table;
+    }
+
+    public PTableRefImpl(PTableRef tableRef) {
+        super(tableRef.getLastAccessTime(), tableRef.getResolvedTimeStamp(), tableRef.getEstimatedSize());
+        this.table = tableRef.getTable();
+    }
+
+    @Override
+    public PTable getTable() {
+        return table;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java
new file mode 100644
index 0000000..a57fc72
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.io.IOException;
+
+import org.apache.phoenix.coprocessor.generated.PTableProtos;
+
+public class SerializedPTableRef extends PTableRef {
+
+    private final byte[] tableBytes;
+
+    public SerializedPTableRef(byte[] tableBytes, long lastAccessTime, long resolvedTime, int estimatedSize) {
+        super(lastAccessTime, resolvedTime, tableBytes.length);
+        this.tableBytes = tableBytes;
+    }
+
+    public SerializedPTableRef(PTableRef tableRef) {
+        super(tableRef.getLastAccessTime(), tableRef.getResolvedTimeStamp(), tableRef.getEstimatedSize());
+        this.tableBytes = ((SerializedPTableRef)tableRef).tableBytes;
+    }
+
+    @Override
+    public PTable getTable() {
+        try {
+            return PTableImpl.createFromProto(PTableProtos.PTable.parseFrom(tableBytes));
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java
new file mode 100644
index 0000000..5da1fd6
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+class SerializedPTableRefFactory extends PTableRefFactory {
+    @Override
+    public PTableRef makePTableRef(PTable table, long lastAccessTime, long resolvedTime) {
+        byte[] serializedBytes = PTableImpl.toProto(table).toByteArray();
+        return new SerializedPTableRef(serializedBytes, lastAccessTime, resolvedTime, table.getEstimatedSize());
+    }
+    
+    @Override
+    public PTableRef makePTableRef(PTableRef tableRef) {
+        return new SerializedPTableRef(tableRef);
+    }
+    
+    private static final SerializedPTableRefFactory INSTANCE = new SerializedPTableRefFactory();
+    
+    public static PTableRefFactory getFactory() {
+        return INSTANCE;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b6c31ef5/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
index ef88c8c..a5660db 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java
@@ -21,12 +21,16 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
 import java.sql.SQLException;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.TimeKeeper;
 import org.junit.Test;
 
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 public class PMetaDataImplTest {
@@ -72,9 +76,11 @@ public class PMetaDataImplTest {
     
     @Test
     public void testEviction() throws Exception {
-        long maxSize = 10;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
-        PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
+        props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10");
+        props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
+        PMetaData metaData = new PMetaDataImpl(5, timeKeeper,  new ReadOnlyProps(props));
         addToTable(metaData, "a", 5, timeKeeper);
         assertEquals(1, metaData.size());
         addToTable(metaData, "b", 4, timeKeeper);
@@ -116,9 +122,11 @@ public class PMetaDataImplTest {
 
     @Test
     public void shouldNotEvictMoreEntriesThanNecessary() throws Exception {
-        long maxSize = 5;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
-        PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
+        props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5");
+        props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
+        PMetaData metaData = new PMetaDataImpl(5, timeKeeper,  new ReadOnlyProps(props));
         addToTable(metaData, "a", 1, timeKeeper);
         assertEquals(1, metaData.size());
         addToTable(metaData, "b", 1, timeKeeper);
@@ -136,9 +144,11 @@ public class PMetaDataImplTest {
 
     @Test
     public void shouldAlwaysKeepAtLeastOneEntryEvenIfTooLarge() throws Exception {
-        long maxSize = 5;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
-        PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
+        props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5");
+        props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
+        PMetaData metaData = new PMetaDataImpl(5, timeKeeper,  new ReadOnlyProps(props));
         addToTable(metaData, "a", 1, timeKeeper);
         assertEquals(1, metaData.size());
         addToTable(metaData, "b", 1, timeKeeper);
@@ -157,9 +167,11 @@ public class PMetaDataImplTest {
 
     @Test
     public void shouldAlwaysKeepOneEntryIfMaxSizeIsZero() throws Exception {
-        long maxSize = 0;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
-        PMetaData metaData = new PMetaDataImpl(0, maxSize, timeKeeper);
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
+        props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "0");
+        props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
+        PMetaData metaData = new PMetaDataImpl(5, timeKeeper,  new ReadOnlyProps(props));
         addToTable(metaData, "a", 1, timeKeeper);
         assertEquals(1, metaData.size());
         addToTable(metaData, "b", 1, timeKeeper);
@@ -178,9 +190,11 @@ public class PMetaDataImplTest {
 
     @Test
     public void testAge() throws Exception {
-        long maxSize = 10;
         TestTimeKeeper timeKeeper = new TestTimeKeeper();
-        PMetaData metaData = new PMetaDataImpl(5, maxSize, timeKeeper);
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
+        props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10");
+        props.put(QueryServices.CLIENT_CACHE_ENCODING, "object");
+        PMetaData metaData = new PMetaDataImpl(5, timeKeeper,  new ReadOnlyProps(props));
         String tableName = "a";
         addToTable(metaData, tableName, 1, timeKeeper);
         PTableRef aTableRef = metaData.getTableRef(new PTableKey(null,tableName));


[12/16] phoenix git commit: PHOENIX-3195 Slight safety improvement for using DistinctPrefixFilter.

Posted by td...@apache.org.
PHOENIX-3195 Slight safety improvement for using DistinctPrefixFilter.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/833cf3b3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/833cf3b3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/833cf3b3

Branch: refs/heads/4.x-HBase-1.0
Commit: 833cf3b3528eca10c8a6e4061e8ada3699873966
Parents: b8ac187
Author: Lars Hofhansl <la...@apache.org>
Authored: Mon Aug 22 13:37:02 2016 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Mon Aug 22 13:38:38 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/iterate/BaseResultIterators.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/833cf3b3/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index ceba000..8b9adfd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -227,7 +227,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
             }
 
             int cols = plan.getGroupBy().getOrderPreservingColumnCount();
-            if (cols > 0 && context.getWhereConditionColumns().size() == 0 &&
+            if (cols > 0 && keyOnlyFilter &&
                 !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) &&
                 cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() &&
                 plan.getGroupBy().isOrderPreserving() &&


[07/16] phoenix git commit: PHOENIX-2944 DATE Comparison Broken(Saurabh Seth)

Posted by td...@apache.org.
PHOENIX-2944 DATE Comparison Broken(Saurabh Seth)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/24e670d7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/24e670d7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/24e670d7

Branch: refs/heads/4.x-HBase-1.0
Commit: 24e670d7163e2a04f15fc48fdc2bac6887c32e0e
Parents: d3c7c9b
Author: Ankit Singhal <an...@gmail.com>
Authored: Tue Aug 16 10:29:18 2016 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Tue Aug 16 10:31:46 2016 +0530

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DateTimeIT.java  | 44 ++++++++++++++++++++
 .../org/apache/phoenix/schema/types/PDate.java  |  2 +-
 .../apache/phoenix/schema/types/PTimestamp.java |  2 +-
 3 files changed, 46 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/24e670d7/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index 461816a..7ffc54f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -732,4 +732,48 @@ public class DateTimeIT extends BaseHBaseManagedTimeIT {
         assertTrue(rs.next());
         assertEquals(new java.util.Date().getYear(),rs.getTimestamp(2).getYear());
     }
+    
+    @Test
+    public void testLiteralDateComparison() throws Exception {
+        ResultSet rs =
+                conn.createStatement().executeQuery(
+                    "select DATE '2016-05-10 00:00:00' > DATE '2016-05-11 00:00:00'");
+
+        assertTrue(rs.next());
+        assertEquals(false, rs.getBoolean(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testLiteralTimestampComparison() throws Exception {
+        ResultSet rs =
+                conn.createStatement().executeQuery(
+                    "select TIMESTAMP '2016-05-10 00:00:00' > TIMESTAMP '2016-05-11 00:00:00'");
+
+        assertTrue(rs.next());
+        assertEquals(false, rs.getBoolean(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testLiteralDateTimestampComparison() throws Exception {
+        ResultSet rs =
+                conn.createStatement().executeQuery(
+                    "select DATE '2016-05-10 00:00:00' > TIMESTAMP '2016-05-11 00:00:00'");
+
+        assertTrue(rs.next());
+        assertEquals(false, rs.getBoolean(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testLiteralDateTimestampComparison2() throws Exception {
+        ResultSet rs =
+                conn.createStatement().executeQuery(
+                    "select TIMESTAMP '2016-05-10 00:00:00' > DATE '2016-05-11 00:00:00'");
+
+        assertTrue(rs.next());
+        assertEquals(false, rs.getBoolean(1));
+        assertFalse(rs.next());
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/24e670d7/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
index b10b1ac..c27d0fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
@@ -129,7 +129,7 @@ public class PDate extends PDataType<Date> {
     if (rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) {
       return -rhsType.compareTo(rhs, lhs, PTime.INSTANCE);
     }
-    return ((java.util.Date) rhs).compareTo((java.util.Date) lhs);
+    return ((java.util.Date) lhs).compareTo((java.util.Date) rhs);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/24e670d7/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index 1f654fe..cdfb533 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -174,7 +174,7 @@ public class PTimestamp extends PDataType<Timestamp> {
         if (equalsAny(rhsType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) {
             return ((java.sql.Timestamp) lhs).compareTo((java.sql.Timestamp) rhs);
         }
-        int c = ((java.util.Date) rhs).compareTo((java.util.Date) lhs);
+        int c = ((java.util.Date) lhs).compareTo((java.util.Date) rhs);
         if (c != 0) return c;
         return ((java.sql.Timestamp) lhs).getNanos();
     }


[06/16] phoenix git commit: PHOENIX-3164 Cache UGI instances for remote users in PQS

Posted by td...@apache.org.
PHOENIX-3164 Cache UGI instances for remote users in PQS

equals(Object) and hashCode() on UGI are implemented
via reference checks (rather than the values themselves).
This creates a situation where new PhoenixConnections are
opened for the same user via PQS.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d3c7c9b4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d3c7c9b4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d3c7c9b4

Branch: refs/heads/4.x-HBase-1.0
Commit: d3c7c9b48e02eef065c0c3af0c25ef3a279b8a3d
Parents: b64b08f
Author: Josh Elser <el...@apache.org>
Authored: Tue Aug 9 14:52:20 2016 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Mon Aug 15 18:42:06 2016 -0400

----------------------------------------------------------------------
 .../org/apache/phoenix/query/QueryServices.java |  3 +
 .../phoenix/query/QueryServicesOptions.java     |  4 +
 .../apache/phoenix/queryserver/server/Main.java | 61 ++++++++++++--
 .../server/PhoenixDoAsCallbackTest.java         | 89 ++++++++++++++++++++
 4 files changed, 149 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3c7c9b4/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index e945021..42f954a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -206,6 +206,9 @@ public interface QueryServices extends SQLCloseable {
     public static final String QUERY_SERVER_DNS_NAMESERVER_ATTRIB = "phoenix.queryserver.dns.nameserver";
     public static final String QUERY_SERVER_DNS_INTERFACE_ATTRIB = "phoenix.queryserver.dns.interface";
     public static final String QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB = "hbase.security.authentication";
+    public static final String QUERY_SERVER_UGI_CACHE_MAX_SIZE = "phoenix.queryserver.ugi.cache.max.size";
+    public static final String QUERY_SERVER_UGI_CACHE_INITIAL_SIZE = "phoenix.queryserver.ugi.cache.initial.size";
+    public static final String QUERY_SERVER_UGI_CACHE_CONCURRENCY = "phoenix.queryserver.ugi.cache.concurrency";
     
     public static final String RENEW_LEASE_ENABLED = "phoenix.scanner.lease.renew.enabled";
     public static final String RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS = "phoenix.scanner.lease.renew.interval";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3c7c9b4/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 9823182..70b85db 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -242,6 +242,10 @@ public class QueryServicesOptions {
     // doesn't depend on phoenix-core.
     public static final String DEFAULT_QUERY_SERVER_SERIALIZATION = "PROTOBUF";
     public static final int DEFAULT_QUERY_SERVER_HTTP_PORT = 8765;
+    public static final long DEFAULT_QUERY_SERVER_UGI_CACHE_MAX_SIZE = 1000L;
+    public static final int DEFAULT_QUERY_SERVER_UGI_CACHE_INITIAL_SIZE = 100;
+    public static final int DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY = 10;
+
     public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true;
     public static final int DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS =
             DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD / 2;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3c7c9b4/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/Main.java
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/Main.java b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/Main.java
index fc2ee34..4b3ca7e 100644
--- a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/Main.java
+++ b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/Main.java
@@ -18,14 +18,15 @@
 package org.apache.phoenix.queryserver.server;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
 import org.apache.calcite.avatica.Meta;
 import org.apache.calcite.avatica.remote.Driver;
 import org.apache.calcite.avatica.remote.LocalService;
 import org.apache.calcite.avatica.remote.Service;
-import org.apache.calcite.avatica.server.AvaticaHandler;
-import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
 import org.apache.calcite.avatica.server.DoAsRemoteUserCallback;
-import org.apache.calcite.avatica.server.HandlerFactory;
 import org.apache.calcite.avatica.server.HttpServer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -43,7 +44,6 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 
 import java.io.File;
-import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.RuntimeMXBean;
 import java.security.PrivilegedExceptionAction;
@@ -54,6 +54,7 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -210,7 +211,7 @@ public final class Main extends Configured implements Tool, Runnable {
         // Enable SPNEGO and impersonation (through standard Hadoop configuration means)
         builder.withSpnego(ugi.getUserName())
             .withAutomaticLogin(keytab)
-            .withImpersonation(new PhoenixDoAsCallback(ugi));
+            .withImpersonation(new PhoenixDoAsCallback(ugi, getConf()));
       }
 
       // Build and start the HttpServer
@@ -261,15 +262,29 @@ public final class Main extends Configured implements Tool, Runnable {
    */
   static class PhoenixDoAsCallback implements DoAsRemoteUserCallback {
     private final UserGroupInformation serverUgi;
+    private final LoadingCache<String,UserGroupInformation> ugiCache;
 
-    public PhoenixDoAsCallback(UserGroupInformation serverUgi) {
+    public PhoenixDoAsCallback(UserGroupInformation serverUgi, Configuration conf) {
       this.serverUgi = Objects.requireNonNull(serverUgi);
+      this.ugiCache = CacheBuilder.newBuilder()
+          .initialCapacity(conf.getInt(QueryServices.QUERY_SERVER_UGI_CACHE_INITIAL_SIZE,
+                  QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_INITIAL_SIZE))
+          .concurrencyLevel(conf.getInt(QueryServices.QUERY_SERVER_UGI_CACHE_CONCURRENCY,
+                  QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY))
+          .maximumSize(conf.getLong(QueryServices.QUERY_SERVER_UGI_CACHE_MAX_SIZE,
+                  QueryServicesOptions.DEFAULT_QUERY_SERVER_UGI_CACHE_MAX_SIZE))
+          .build(new UgiCacheLoader(this.serverUgi));
     }
 
     @Override
     public <T> T doAsRemoteUser(String remoteUserName, String remoteAddress, final Callable<T> action) throws Exception {
-      // Proxy this user on top of the server's user (the real user)
-      UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(remoteUserName, serverUgi);
+      // We are guaranteed by Avatica that the `remoteUserName` is properly authenticated by the
+      // time this method is called. We don't have to verify the wire credentials, we can assume the
+      // user provided valid credentials for who it claimed it was.
+
+      // Proxy this user on top of the server's user (the real user). Get a cached instance, the
+      // LoadingCache will create a new instance for us if one isn't cached.
+      UserGroupInformation proxyUser = createProxyUser(remoteUserName);
 
       // Check if this user is allowed to be impersonated.
       // Will throw AuthorizationException if the impersonation as this user is not allowed
@@ -283,6 +298,36 @@ public final class Main extends Configured implements Tool, Runnable {
         }
       });
     }
+
+      @VisibleForTesting
+      UserGroupInformation createProxyUser(String remoteUserName) throws ExecutionException {
+          // PHOENIX-3164 UGI's hashCode and equals methods rely on reference checks, not
+          // value-based checks. We need to make sure we return the same UGI instance for a remote
+          // user, otherwise downstream code in Phoenix and HBase may not treat two of the same
+          // calls from one user as equivalent.
+          return ugiCache.get(remoteUserName);
+      }
+
+      @VisibleForTesting
+      LoadingCache<String,UserGroupInformation> getCache() {
+          return ugiCache;
+      }
+  }
+
+  /**
+   * CacheLoader implementation which creates a "proxy" UGI instance for the given user name.
+   */
+  static class UgiCacheLoader extends CacheLoader<String,UserGroupInformation> {
+      private final UserGroupInformation serverUgi;
+
+      public UgiCacheLoader(UserGroupInformation serverUgi) {
+          this.serverUgi = Objects.requireNonNull(serverUgi);
+      }
+
+      @Override
+      public UserGroupInformation load(String remoteUserName) throws Exception {
+          return UserGroupInformation.createProxyUser(remoteUserName, serverUgi);
+      }
   }
 
   public static void main(String[] argv) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3c7c9b4/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
new file mode 100644
index 0000000..000baec
--- /dev/null
+++ b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/PhoenixDoAsCallbackTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.queryserver.server;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.phoenix.queryserver.server.Main.PhoenixDoAsCallback;
+import org.junit.Test;
+
+/**
+ * Tests for the authorization callback hook Avatica provides for Phoenix to implement.
+ */
+public class PhoenixDoAsCallbackTest {
+
+    @Test
+    public void ugiInstancesAreCached() throws Exception {
+        Configuration conf = new Configuration(false);
+        UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
+        PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);
+
+        UserGroupInformation ugi1 = callback.createProxyUser("user1");
+        assertEquals(1, callback.getCache().size());
+        assertTrue(ugi1.getRealUser() == serverUgi);
+        UserGroupInformation ugi2 = callback.createProxyUser("user2");
+        assertEquals(2, callback.getCache().size());
+        assertTrue(ugi2.getRealUser() == serverUgi);
+
+        UserGroupInformation ugi1Reference = callback.createProxyUser("user1");
+        assertTrue(ugi1 == ugi1Reference);
+        assertEquals(2, callback.getCache().size());
+    }
+
+    @Test
+    public void proxyingUsersAreCached() throws Exception {
+      Configuration conf = new Configuration(false);
+      // The user "server" can impersonate anyone
+      conf.set("hadoop.proxyuser.server.groups", "*");
+      conf.set("hadoop.proxyuser.server.hosts", "*");
+      // Trigger ProxyUsers to refresh itself with the above configuration
+      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+      UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
+      PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);
+
+      UserGroupInformation user1 = callback.doAsRemoteUser("user1", "localhost:1234", new Callable<UserGroupInformation>() {
+          public UserGroupInformation call() throws Exception {
+            return UserGroupInformation.getCurrentUser();
+          }
+      });
+
+      UserGroupInformation user2 = callback.doAsRemoteUser("user2", "localhost:1235", new Callable<UserGroupInformation>() {
+          public UserGroupInformation call() throws Exception {
+            return UserGroupInformation.getCurrentUser();
+          }
+      });
+
+      UserGroupInformation user1Reference = callback.doAsRemoteUser("user1", "localhost:1234", new Callable<UserGroupInformation>() {
+          public UserGroupInformation call() throws Exception {
+            return UserGroupInformation.getCurrentUser();
+          }
+      });
+
+      // The UserGroupInformation.getCurrentUser() actually returns a new UGI instance, but the internal
+      // subject is the same. We can verify things will work as expected that way.
+      assertNotEquals(user1.hashCode(), user2.hashCode());
+      assertEquals("These should be the same (cached) instance", user1.hashCode(), user1Reference.hashCode());
+      assertEquals("These should be the same (cached) instance", user1, user1Reference);
+    }
+}


[09/16] phoenix git commit: PHOENIX-2995 Write performance severely degrades with large number of views

Posted by td...@apache.org.
PHOENIX-2995 Write performance severely degrades with large number of views


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c22020a4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c22020a4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c22020a4

Branch: refs/heads/4.x-HBase-1.0
Commit: c22020a479ac39208616d647fbae339cb3fdb1b4
Parents: 24e670d
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Fri Jul 22 14:24:38 2016 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Wed Aug 17 11:14:04 2016 -0700

----------------------------------------------------------------------
 .../apache/phoenix/end2end/UpsertSelectIT.java  |   2 +-
 .../phoenix/compile/CreateTableCompiler.java    |   7 +-
 .../apache/phoenix/execute/MutationState.java   | 328 ++++++++++++-------
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  57 ++--
 .../query/ConnectionQueryServicesImpl.java      |  93 +++---
 .../query/ConnectionlessQueryServicesImpl.java  |  47 ++-
 .../query/DelegateConnectionQueryServices.java  |  38 +--
 .../apache/phoenix/query/MetaDataMutated.java   |  19 +-
 .../org/apache/phoenix/schema/PMetaData.java    |   4 +-
 .../apache/phoenix/schema/PMetaDataImpl.java    | 107 +++---
 .../phoenix/schema/PSynchronizedMetaData.java   | 249 ++++++++++++++
 .../apache/phoenix/util/TransactionUtil.java    |   4 +-
 .../phoenix/schema/PMetaDataImplTest.java       |  68 ++--
 13 files changed, 652 insertions(+), 371 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 30de4de..4d811a4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -1022,7 +1022,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
             // Upsert data with scn set on the connection. The timestamp of the put will be the value of the row_timestamp column.
             long rowTimestamp1 = 100;
             Date rowTimestampDate = new Date(rowTimestamp1);
-            try (Connection conn = getConnection(ts)) {
+            try (Connection conn = getConnection(ts+1)) {
                 PreparedStatement stmt = conn.prepareStatement("UPSERT INTO  " + tableName + " (PK1, PK2, PK3, KV1) VALUES(?, ?, ?, ?)");
                 stmt.setInt(1, 1);
                 stmt.setDate(2, rowTimestampDate);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
index b545156..3928f66 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
@@ -54,7 +54,6 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PDatum;
-import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.PTableType;
@@ -143,11 +142,11 @@ public class CreateTableCompiler {
                             // on our connection.
                             new DelegateConnectionQueryServices(connection.getQueryServices()) {
                                 @Override
-                                public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
-                                    return connection.addTable(table, resolvedTime);
+                                public void addTable(PTable table, long resolvedTime) throws SQLException {
+                                    connection.addTable(table, resolvedTime);
                                 }
                             },
-                            connection, tableRef.getTimeStamp());
+                            connection, tableRef.getTimeStamp()+1);
                     viewColumnConstantsToBe = new byte[nColumns][];
                     ViewWhereExpressionVisitor visitor = new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe);
                     where.accept(visitor);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index d44b679..38d24aa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -30,6 +30,7 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
@@ -70,6 +71,7 @@ import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PMetaData;
+import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PRow;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
@@ -359,7 +361,7 @@ public class MutationState implements SQLCloseable {
         HTableInterface htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
         Transaction currentTx;
         if (table.isTransactional() && (currentTx=getTransaction()) != null) {
-            TransactionAwareHTable txAware = TransactionUtil.getTransactionAwareHTable(htable, table);
+            TransactionAwareHTable txAware = TransactionUtil.getTransactionAwareHTable(htable, table.isImmutableRows());
             // Using cloned mutationState as we may have started a new transaction already
             // if auto commit is true and we need to use the original one here.
             txAware.startTx(currentTx);
@@ -553,7 +555,7 @@ public class MutationState implements SQLCloseable {
         return ptr;
     }
     
-    private Iterator<Pair<byte[],List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values,
+    private Iterator<Pair<PName,List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values,
             final long timestamp, boolean includeAllIndexes, final boolean sendAll) { 
         final PTable table = tableRef.getTable();
         final Iterator<PTable> indexes = // Only maintain tables with immutable rows through this client-side mechanism
@@ -565,7 +567,7 @@ public class MutationState implements SQLCloseable {
         final List<Mutation> mutationList = Lists.newArrayListWithExpectedSize(values.size());
         final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
         generateMutations(tableRef, timestamp, values, mutationList, mutationsPertainingToIndex);
-        return new Iterator<Pair<byte[],List<Mutation>>>() {
+        return new Iterator<Pair<PName,List<Mutation>>>() {
             boolean isFirst = true;
 
             @Override
@@ -574,10 +576,10 @@ public class MutationState implements SQLCloseable {
             }
 
             @Override
-            public Pair<byte[], List<Mutation>> next() {
+            public Pair<PName, List<Mutation>> next() {
                 if (isFirst) {
                     isFirst = false;
-                    return new Pair<byte[],List<Mutation>>(table.getPhysicalName().getBytes(), mutationList);
+                    return new Pair<PName,List<Mutation>>(table.getPhysicalName(), mutationList);
                 }
                 PTable index = indexes.next();
                 List<Mutation> indexMutations;
@@ -598,7 +600,7 @@ public class MutationState implements SQLCloseable {
                 } catch (SQLException e) {
                     throw new IllegalDataException(e);
                 }
-                return new Pair<byte[],List<Mutation>>(index.getPhysicalName().getBytes(),indexMutations);
+                return new Pair<PName,List<Mutation>>(index.getPhysicalName(),indexMutations);
             }
 
             @Override
@@ -685,7 +687,24 @@ public class MutationState implements SQLCloseable {
             private Iterator<Pair<byte[],List<Mutation>>> innerIterator = init();
                     
             private Iterator<Pair<byte[],List<Mutation>>> init() {
-                return addRowMutations(current.getKey(), current.getValue(), timestamp, includeMutableIndexes, true);
+                final Iterator<Pair<PName, List<Mutation>>> mutationIterator = addRowMutations(current.getKey(), current.getValue(), timestamp, includeMutableIndexes, true);
+                return new Iterator<Pair<byte[],List<Mutation>>>() {
+                    @Override
+                    public boolean hasNext() {
+                        return mutationIterator.hasNext();
+                    }
+
+                    @Override
+                    public Pair<byte[], List<Mutation>> next() {
+                        Pair<PName, List<Mutation>> pair = mutationIterator.next();
+                        return new Pair<byte[], List<Mutation>>(pair.getFirst().getBytes(), pair.getSecond());
+                    }
+                    
+                    @Override
+                    public void remove() {
+                        mutationIterator.remove();
+                    }
+                };
             }
             
             @Override
@@ -870,6 +889,55 @@ public class MutationState implements SQLCloseable {
         }
     }
     
+    private static class TableInfo {
+        
+        private final boolean isDataTable;
+        @Nonnull private final PName hTableName;
+        @Nonnull private final TableRef origTableRef;
+        
+        public TableInfo(boolean isDataTable, PName hTableName, TableRef origTableRef) {
+            super();
+            checkNotNull(hTableName);
+            checkNotNull(origTableRef);
+            this.isDataTable = isDataTable;
+            this.hTableName = hTableName;
+            this.origTableRef = origTableRef;
+        }
+
+        public boolean isDataTable() {
+            return isDataTable;
+        }
+
+        public PName getHTableName() {
+            return hTableName;
+        }
+
+        public TableRef getOrigTableRef() {
+            return origTableRef;
+        }
+
+        @Override
+        public int hashCode() {
+            final int prime = 31;
+            int result = 1;
+            result = prime * result + hTableName.hashCode();
+            result = prime * result + (isDataTable ? 1231 : 1237);
+            return result;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (this == obj) return true;
+            if (obj == null) return false;
+            if (getClass() != obj.getClass()) return false;
+            TableInfo other = (TableInfo) obj;
+            if (!hTableName.equals(other.hTableName)) return false;
+            if (isDataTable != other.isDataTable) return false;
+            return true;
+        }
+
+    }
+    
     @SuppressWarnings("deprecation")
     private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
         int i = 0;
@@ -883,6 +951,7 @@ public class MutationState implements SQLCloseable {
 
         Map<ImmutableBytesPtr, RowMutationState> valuesMap;
         List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
+        Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap(); 
         // add tracing for this operation
         try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
             Span span = trace.getSpan();
@@ -898,126 +967,24 @@ public class MutationState implements SQLCloseable {
                 // Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
                 long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
                 final PTable table = tableRef.getTable();
-                // Track tables to which we've sent uncommitted data
-                if (isTransactional = table.isTransactional()) {
-                    txTableRefs.add(tableRef);
-                    addDMLFence(table);
-                    uncommittedPhysicalNames.add(table.getPhysicalName().getString());
-                }
+                Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
+                // build map from physical table to mutation list
                 boolean isDataTable = true;
-                table.getIndexMaintainers(indexMetaDataPtr, connection);
-                Iterator<Pair<byte[],List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
                 while (mutationsIterator.hasNext()) {
-                    Pair<byte[],List<Mutation>> pair = mutationsIterator.next();
-                    byte[] htableName = pair.getFirst();
+                    Pair<PName,List<Mutation>> pair = mutationsIterator.next();
+                    PName hTableName = pair.getFirst();
                     List<Mutation> mutationList = pair.getSecond();
-                    
-                    //create a span per target table
-                    //TODO maybe we can be smarter about the table name to string here?
-                    Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
-    
-                    int retryCount = 0;
-                    boolean shouldRetry = false;
-                    do {
-                        final ServerCache cache = isDataTable ? setMetaDataOnMutations(tableRef, mutationList, indexMetaDataPtr) : null;
-                    
-                        // If we haven't retried yet, retry for this case only, as it's possible that
-                        // a split will occur after we send the index metadata cache to all known
-                        // region servers.
-                        shouldRetry = cache != null;
-                        SQLException sqlE = null;
-                        HTableInterface hTable = connection.getQueryServices().getTable(htableName);
-                        try {
-                            if (isTransactional) {
-                                // If we have indexes, wrap the HTable in a delegate HTable that
-                                // will attach the necessary index meta data in the event of a
-                                // rollback
-                                if (!table.getIndexes().isEmpty()) {
-                                    hTable = new MetaDataAwareHTable(hTable, tableRef);
-                                }
-                                TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table);
-                                // Don't add immutable indexes (those are the only ones that would participate
-                                // during a commit), as we don't need conflict detection for these.
-                                if (isDataTable) {
-                                    // Even for immutable, we need to do this so that an abort has the state
-                                    // necessary to generate the rows to delete.
-                                    addTransactionParticipant(txnAware);
-                                } else {
-                                    txnAware.startTx(getTransaction());
-                                }
-                                hTable = txnAware;
-                            }
-                            long numMutations = mutationList.size();
-                            GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
-                            
-                            long startTime = System.currentTimeMillis();
-                            child.addTimelineAnnotation("Attempt " + retryCount);
-                            hTable.batch(mutationList);
-                            if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
-                            child.stop();
-                            child.stop();
-                            shouldRetry = false;
-                            long mutationCommitTime = System.currentTimeMillis() - startTime;
-                            GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
-                            
-                            long mutationSizeBytes = calculateMutationSize(mutationList);
-                            MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
-                            mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
-                        } catch (Exception e) {
-                            SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
-                            if (inferredE != null) {
-                                if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
-                                    // Swallow this exception once, as it's possible that we split after sending the index metadata
-                                    // and one of the region servers doesn't have it. This will cause it to have it the next go around.
-                                    // If it fails again, we don't retry.
-                                    String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
-                                    logger.warn(LogUtil.addCustomAnnotations(msg, connection));
-                                    connection.getQueryServices().clearTableRegionCache(htableName);
-    
-                                    // add a new child span as this one failed
-                                    child.addTimelineAnnotation(msg);
-                                    child.stop();
-                                    child = Tracing.child(span,"Failed batch, attempting retry");
-    
-                                    continue;
-                                }
-                                e = inferredE;
-                            }
-                            // Throw to client an exception that indicates the statements that
-                            // were not committed successfully.
-                            sqlE = new CommitException(e, getUncommittedStatementIndexes());
-                        } finally {
-                            try {
-                                if (cache != null) {
-                                    cache.close();
-                                }
-                            } finally {
-                                try {
-                                    hTable.close();
-                                } 
-                                catch (IOException e) {
-                                    if (sqlE != null) {
-                                        sqlE.setNextException(ServerUtil.parseServerException(e));
-                                    } else {
-                                        sqlE = ServerUtil.parseServerException(e);
-                                    }
-                                } 
-                                if (sqlE != null) {
-                                    throw sqlE;
-                                }
-                            }
-                        }
-                    } while (shouldRetry && retryCount++ < 1);
+                    TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
+                    List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
+                    if (oldMutationList!=null)
+                        mutationList.addAll(0, oldMutationList);
                     isDataTable = false;
                 }
-                if (tableRef.getTable().getType() != PTableType.INDEX) {
-                    numRows -= valuesMap.size();
-                }
                 // For transactions, track the statement indexes as we send data
                 // over because our CommitException should include all statements
                 // involved in the transaction since none of them would have been
                 // committed in the event of a failure.
-                if (isTransactional) {
+                if (table.isTransactional()) {
                     addUncommittedStatementIndexes(valuesMap.values());
                     if (txMutations.isEmpty()) {
                         txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
@@ -1029,15 +996,122 @@ public class MutationState implements SQLCloseable {
                     // indexes have changed.
                     joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
                 }
-                // Remove batches as we process them
-                if (sendAll) {
-                    // Iterating through map key set in this case, so we cannot use
-                    // the remove method without getting a concurrent modification
-                    // exception.
-                    tableRefIterator.remove();
-                } else {
-                    mutations.remove(tableRef);
-                }
+            }
+            Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
+            while (mutationsIterator.hasNext()) {
+                Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
+                TableInfo tableInfo = pair.getKey();
+                byte[] htableName = tableInfo.getHTableName().getBytes();
+                List<Mutation> mutationList = pair.getValue();
+                
+                //create a span per target table
+                //TODO maybe we can be smarter about the table name to string here?
+                Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
+
+                int retryCount = 0;
+                boolean shouldRetry = false;
+                do {
+                    TableRef origTableRef = tableInfo.getOrigTableRef();
+                    PTable table = origTableRef.getTable();
+                    table.getIndexMaintainers(indexMetaDataPtr, connection);
+                    final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
+                    // If we haven't retried yet, retry for this case only, as it's possible that
+                    // a split will occur after we send the index metadata cache to all known
+                    // region servers.
+                    shouldRetry = cache!=null;
+                    SQLException sqlE = null;
+                    HTableInterface hTable = connection.getQueryServices().getTable(htableName);
+                    try {
+                        if (table.isTransactional()) {
+                            // Track tables to which we've sent uncommitted data
+                            txTableRefs.add(origTableRef);
+                            addDMLFence(table);
+                            uncommittedPhysicalNames.add(table.getPhysicalName().getString());
+                            
+                            // If we have indexes, wrap the HTable in a delegate HTable that
+                            // will attach the necessary index meta data in the event of a
+                            // rollback
+                            if (!table.getIndexes().isEmpty()) {
+                                hTable = new MetaDataAwareHTable(hTable, origTableRef);
+                            }
+                            TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
+                            // Don't add immutable indexes (those are the only ones that would participate
+                            // during a commit), as we don't need conflict detection for these.
+                            if (tableInfo.isDataTable()) {
+                                // Even for immutable, we need to do this so that an abort has the state
+                                // necessary to generate the rows to delete.
+                                addTransactionParticipant(txnAware);
+                            } else {
+                                txnAware.startTx(getTransaction());
+                            }
+                            hTable = txnAware;
+                        }
+                        
+                        long numMutations = mutationList.size();
+                        GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
+                        
+                        long startTime = System.currentTimeMillis();
+                        child.addTimelineAnnotation("Attempt " + retryCount);
+                        hTable.batch(mutationList);
+                        if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
+                        child.stop();
+                        child.stop();
+                        shouldRetry = false;
+                        long mutationCommitTime = System.currentTimeMillis() - startTime;
+                        GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
+                        
+                        long mutationSizeBytes = calculateMutationSize(mutationList);
+                        MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
+                        mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
+                        if (tableInfo.isDataTable()) {
+                            numRows -= numMutations;
+                        }
+                        // Remove batches as we process them
+                        mutations.remove(origTableRef);
+                    } catch (Exception e) {
+                        SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
+                        if (inferredE != null) {
+                            if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
+                                // Swallow this exception once, as it's possible that we split after sending the index metadata
+                                // and one of the region servers doesn't have it. This will cause it to have it the next go around.
+                                // If it fails again, we don't retry.
+                                String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
+                                logger.warn(LogUtil.addCustomAnnotations(msg, connection));
+                                connection.getQueryServices().clearTableRegionCache(htableName);
+
+                                // add a new child span as this one failed
+                                child.addTimelineAnnotation(msg);
+                                child.stop();
+                                child = Tracing.child(span,"Failed batch, attempting retry");
+
+                                continue;
+                            }
+                            e = inferredE;
+                        }
+                        // Throw to client an exception that indicates the statements that
+                        // were not committed successfully.
+                        sqlE = new CommitException(e, getUncommittedStatementIndexes());
+                    } finally {
+                        try {
+                            if (cache!=null) 
+                                cache.close();
+                        } finally {
+                            try {
+                                hTable.close();
+                            } 
+                            catch (IOException e) {
+                                if (sqlE != null) {
+                                    sqlE.setNextException(ServerUtil.parseServerException(e));
+                                } else {
+                                    sqlE = ServerUtil.parseServerException(e);
+                                }
+                            } 
+                            if (sqlE != null) {
+                                throw sqlE;
+                            }
+                        }
+                    }
+                } while (shouldRetry && retryCount++ < 1);
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 4d01d08..0d09e75 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -112,8 +112,9 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.SchemaUtil;
-
 import org.apache.tephra.TransactionContext;
+import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.TraceScope;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Objects;
@@ -280,20 +281,21 @@ public class PhoenixConnection implements Connection, MetaDataMutated, SQLClosea
                 long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
                 return (table.getType() != PTableType.SYSTEM && 
                         (  table.getTimeStamp() >= maxTimestamp || 
-                         ! Objects.equal(tenantId, table.getTenantId())) );
+                         (table.getTenantId()!=null && ! Objects.equal(tenantId, table.getTenantId()))));
             }
             
             @Override
             public boolean prune(PFunction function) {
                 long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
                 return ( function.getTimeStamp() >= maxTimestamp ||
-                         ! Objects.equal(tenantId, function.getTenantId()));
+                        (function.getTenantId()!=null && ! Objects.equal(tenantId, function.getTenantId())));
             }
         };
         this.isRequestLevelMetricsEnabled = JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info, this.services.getProps());
         this.mutationState = mutationState == null ? newMutationState(maxSize) : new MutationState(mutationState);
-        this.metaData = metaData.pruneTables(pruner);
-        this.metaData = metaData.pruneFunctions(pruner);
+        this.metaData = metaData;
+        this.metaData.pruneTables(pruner);
+        this.metaData.pruneFunctions(pruner);
         this.services.addConnection(this);
 
         // setup tracing, if its enabled
@@ -900,79 +902,71 @@ public class PhoenixConnection implements Connection, MetaDataMutated, SQLClosea
     }
     
     @Override
-    public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
-        metaData = metaData.addTable(table, resolvedTime);
+    public void addTable(PTable table, long resolvedTime) throws SQLException {
+        metaData.addTable(table, resolvedTime);
         //Cascade through to connectionQueryServices too
         getQueryServices().addTable(table, resolvedTime);
-        return metaData;
     }
     
     @Override
-    public PMetaData updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException {
-    	metaData = metaData.updateResolvedTimestamp(table, resolvedTime);
+    public void updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException {
+    	metaData.updateResolvedTimestamp(table, resolvedTime);
     	//Cascade through to connectionQueryServices too
         getQueryServices().updateResolvedTimestamp(table, resolvedTime);
-        return metaData;
     }
     
     @Override
-    public PMetaData addFunction(PFunction function) throws SQLException {
+    public void addFunction(PFunction function) throws SQLException {
         // TODO: since a connection is only used by one thread at a time,
         // we could modify this metadata in place since it's not shared.
         if (scn == null || scn > function.getTimeStamp()) {
-            metaData = metaData.addFunction(function);
+            metaData.addFunction(function);
         }
         //Cascade through to connectionQueryServices too
         getQueryServices().addFunction(function);
-        return metaData;
     }
 
     @Override
-    public PMetaData addSchema(PSchema schema) throws SQLException {
-        metaData = metaData.addSchema(schema);
+    public void addSchema(PSchema schema) throws SQLException {
+        metaData.addSchema(schema);
         // Cascade through to connectionQueryServices too
         getQueryServices().addSchema(schema);
-        return metaData;
     }
 
     @Override
-    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
+    public void addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
             long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls,
             boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime)
                     throws SQLException {
-        metaData = metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
+        metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
                 isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency, isNamespaceMapped,
                 resolvedTime);
         // Cascade through to connectionQueryServices too
         getQueryServices().addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
                 isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency, isNamespaceMapped,
                 resolvedTime);
-        return metaData;
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
-        metaData = metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
+    public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
+        metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
         //Cascade through to connectionQueryServices too
         getQueryServices().removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
-        return metaData;
     }
 
     @Override
-    public PMetaData removeFunction(PName tenantId, String functionName, long tableTimeStamp) throws SQLException {
-        metaData = metaData.removeFunction(tenantId, functionName, tableTimeStamp);
+    public void removeFunction(PName tenantId, String functionName, long tableTimeStamp) throws SQLException {
+        metaData.removeFunction(tenantId, functionName, tableTimeStamp);
         //Cascade through to connectionQueryServices too
         getQueryServices().removeFunction(tenantId, functionName, tableTimeStamp);
-        return metaData;
     }
 
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+    public void removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
             long tableSeqNum, long resolvedTime) throws SQLException {
-        metaData = metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
+        metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
         //Cascade through to connectionQueryServices too
         getQueryServices().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
-        return metaData;
     }
 
     protected boolean removeStatement(PhoenixStatement statement) throws SQLException {
@@ -1072,11 +1066,10 @@ public class PhoenixConnection implements Connection, MetaDataMutated, SQLClosea
     }
 
     @Override
-    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
-        metaData = metaData.removeSchema(schema, schemaTimeStamp);
+    public void removeSchema(PSchema schema, long schemaTimeStamp) {
+        metaData.removeSchema(schema, schemaTimeStamp);
         // Cascade through to connectionQueryServices too
         getQueryServices().removeSchema(schema, schemaTimeStamp);
-        return metaData;
 
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index e6fd1f6..23f6964 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -31,8 +31,6 @@ import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
 
 import java.io.IOException;
 import java.lang.ref.WeakReference;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -84,7 +82,6 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
-import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -159,8 +156,8 @@ import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PMetaDataImpl;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PSynchronizedMetaData;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ReadOnlyTableException;
@@ -181,7 +178,6 @@ import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.ConfigUtil;
-import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.JDBCUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixContextExecutor;
@@ -288,7 +284,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     private PMetaData newEmptyMetaData() {
         long maxSizeBytes = props.getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB,
                 QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE);
-        return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes);
+        return new PSynchronizedMetaData(new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, maxSizeBytes));
     }
     
     /**
@@ -554,7 +550,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
+    public void addTable(PTable table, long resolvedTime) throws SQLException {
         synchronized (latestMetaDataLock) {
             try {
                 throwConnectionClosedIfNullMetaData();
@@ -562,26 +558,25 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 // If a client opens a connection at an earlier timestamp, this can happen
                 PTable existingTable = latestMetaData.getTableRef(new PTableKey(table.getTenantId(), table.getName().getString())).getTable();
                 if (existingTable.getTimeStamp() >= table.getTimeStamp()) {
-                    return latestMetaData;
+                    return;
                 }
             } catch (TableNotFoundException e) {}
-            latestMetaData = latestMetaData.addTable(table, resolvedTime);
+            latestMetaData.addTable(table, resolvedTime);
             latestMetaDataLock.notifyAll();
-            return latestMetaData;
         }
     }
     
-    public PMetaData updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException {
-    	synchronized (latestMetaDataLock) {
+    @Override
+    public void updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException {
+        synchronized (latestMetaDataLock) {
             throwConnectionClosedIfNullMetaData();
-            latestMetaData = latestMetaData.updateResolvedTimestamp(table, resolvedTime);
+            latestMetaData.updateResolvedTimestamp(table, resolvedTime);
             latestMetaDataLock.notifyAll();
-            return latestMetaData;
         }
     }
 
     private static interface Mutator {
-        PMetaData mutate(PMetaData metaData) throws SQLException;
+        void mutate(PMetaData metaData) throws SQLException;
     }
 
     /**
@@ -604,7 +599,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                          */
                         if (table.getSequenceNumber() + 1 == tableSeqNum) {
                             // TODO: assert that timeStamp is bigger that table timeStamp?
-                            metaData = mutator.mutate(metaData);
+                            mutator.mutate(metaData);
                             break;
                         } else if (table.getSequenceNumber() >= tableSeqNum) {
                             logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
@@ -619,7 +614,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                         logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
                         // There will never be a parentTableName here, as that would only
                         // be non null for an index an we never add/remove columns from an index.
-                        metaData = metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
+                        metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
                         break;
                     }
                     latestMetaDataLock.wait(waitTime);
@@ -637,46 +632,43 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
      }
 
 	@Override
-    public PMetaData addColumn(final PName tenantId, final String tableName, final List<PColumn> columns,
+    public void addColumn(final PName tenantId, final String tableName, final List<PColumn> columns,
             final long tableTimeStamp, final long tableSeqNum, final boolean isImmutableRows,
             final boolean isWalDisabled, final boolean isMultitenant, final boolean storeNulls,
             final boolean isTransactional, final long updateCacheFrequency, final boolean isNamespaceMapped,
             final long resolvedTime) throws SQLException {
-	    return metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
+	    metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
             @Override
-            public PMetaData mutate(PMetaData metaData) throws SQLException {
+            public void mutate(PMetaData metaData) throws SQLException {
                 try {
-                    return metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum,
+                    metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum,
                             isImmutableRows, isWalDisabled, isMultitenant, storeNulls, isTransactional,
                             updateCacheFrequency, isNamespaceMapped, resolvedTime);
                 } catch (TableNotFoundException e) {
                     // The DROP TABLE may have been processed first, so just ignore.
-                    return metaData;
                 }
             }
         });
      }
 
     @Override
-    public PMetaData removeTable(PName tenantId, final String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
+    public void removeTable(PName tenantId, final String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
         synchronized (latestMetaDataLock) {
             throwConnectionClosedIfNullMetaData();
-            latestMetaData = latestMetaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
+            latestMetaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
             latestMetaDataLock.notifyAll();
-            return latestMetaData;
         }
     }
 
     @Override
-    public PMetaData removeColumn(final PName tenantId, final String tableName, final List<PColumn> columnsToRemove, final long tableTimeStamp, final long tableSeqNum, final long resolvedTime) throws SQLException {
-        return metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
+    public void removeColumn(final PName tenantId, final String tableName, final List<PColumn> columnsToRemove, final long tableTimeStamp, final long tableSeqNum, final long resolvedTime) throws SQLException {
+        metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
             @Override
-            public PMetaData mutate(PMetaData metaData) throws SQLException {
+            public void mutate(PMetaData metaData) throws SQLException {
                 try {
-                    return metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
+                    metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
                 } catch (TableNotFoundException e) {
                     // The DROP TABLE may have been processed first, so just ignore.
-                    return metaData;
                 }
             }
         });
@@ -687,10 +679,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     public PhoenixConnection connect(String url, Properties info) throws SQLException {
         checkClosed();
         PMetaData metadata = latestMetaData;
-        if (metadata == null) {
-            throwConnectionClosedException();
-        }
-
+        throwConnectionClosedIfNullMetaData();
+        metadata = metadata.clone();
         return new PhoenixConnection(this, url, info, metadata);
     }
 
@@ -1643,9 +1633,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         PTable table;
         try {
             PMetaData metadata = latestMetaData;
-            if (metadata == null) {
-                throwConnectionClosedException();
-            }
+            throwConnectionClosedIfNullMetaData();
             table = metadata.getTableRef(new PTableKey(tenantId, fullTableName)).getTable();
             if (table.getTimeStamp() >= timestamp) { // Table in cache is newer than client timestamp which shouldn't be
                                                      // the case
@@ -2164,14 +2152,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             .build().buildException();
         }
     }
-    
+
     private HashSet<String> existingColumnFamiliesForBaseTable(PName baseTableName) throws TableNotFoundException {
-        synchronized (latestMetaDataLock) {
-            throwConnectionClosedIfNullMetaData();
-            PTable table = latestMetaData.getTableRef(new PTableKey(null, baseTableName.getString())).getTable();
-            latestMetaDataLock.notifyAll();
-            return existingColumnFamilies(table);
-        }
+        throwConnectionClosedIfNullMetaData();
+        PTable table = latestMetaData.getTableRef(new PTableKey(null, baseTableName.getString())).getTable();
+        return existingColumnFamilies(table);
     }
     
     private HashSet<String> existingColumnFamilies(PTable table) {
@@ -3362,7 +3347,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public PMetaData addFunction(PFunction function) throws SQLException {
+    public void addFunction(PFunction function) throws SQLException {
         synchronized (latestMetaDataLock) {
             try {
                 throwConnectionClosedIfNullMetaData();
@@ -3370,23 +3355,21 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 // If a client opens a connection at an earlier timestamp, this can happen
                 PFunction existingFunction = latestMetaData.getFunction(new PTableKey(function.getTenantId(), function.getFunctionName()));
                 if (existingFunction.getTimeStamp() >= function.getTimeStamp()) {
-                    return latestMetaData;
+                    return;
                 }
             } catch (FunctionNotFoundException e) {}
-            latestMetaData = latestMetaData.addFunction(function);
+            latestMetaData.addFunction(function);
             latestMetaDataLock.notifyAll();
-            return latestMetaData;
         }
     }
 
     @Override
-    public PMetaData removeFunction(PName tenantId, String function, long functionTimeStamp)
+    public void removeFunction(PName tenantId, String function, long functionTimeStamp)
             throws SQLException {
         synchronized (latestMetaDataLock) {
             throwConnectionClosedIfNullMetaData();
-            latestMetaData = latestMetaData.removeFunction(tenantId, function, functionTimeStamp);
+            latestMetaData.removeFunction(tenantId, function, functionTimeStamp);
             latestMetaDataLock.notifyAll();
-            return latestMetaData;
         }
     }
 
@@ -3642,13 +3625,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public PMetaData addSchema(PSchema schema) throws SQLException {
-        return latestMetaData = latestMetaData.addSchema(schema);
+    public void addSchema(PSchema schema) throws SQLException {
+        latestMetaData.addSchema(schema);
     }
 
     @Override
-    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
-        return latestMetaData = latestMetaData.removeSchema(schema, schemaTimeStamp);
+    public void removeSchema(PSchema schema, long schemaTimeStamp) {
+        latestMetaData.removeSchema(schema, schemaTimeStamp);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index f373de2..25aca74 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -24,8 +24,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.Objects;
 import java.util.Map.Entry;
+import java.util.Objects;
 import java.util.Properties;
 import java.util.Set;
 
@@ -86,14 +86,13 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.SequenceUtil;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
 import org.apache.tephra.TransactionManager;
 import org.apache.tephra.TransactionSystemClient;
 import org.apache.tephra.inmemory.InMemoryTxSystemClient;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
 
 /**
  *
@@ -172,41 +171,41 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
-        return metaData = metaData.addTable(table, resolvedTime);
+    public void addTable(PTable table, long resolvedTime) throws SQLException {
+        metaData.addTable(table, resolvedTime);
     }
     
     @Override
-    public PMetaData updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
-        return metaData = metaData.updateResolvedTimestamp(table, resolvedTimestamp);
+    public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
+        metaData.updateResolvedTimestamp(table, resolvedTimestamp);
     }
 
     @Override
-    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
+    public void addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
             long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls,
             boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime)
                     throws SQLException {
-        return metaData = metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
+        metaData.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
                 isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency, isNamespaceMapped,
                 resolvedTime);
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
+    public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
             throws SQLException {
-        return metaData = metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
+        metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
     }
 
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+    public void removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
             long tableSeqNum, long resolvedTime) throws SQLException {
-        return metaData = metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
+        metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
     }
 
     
     @Override
     public PhoenixConnection connect(String url, Properties info) throws SQLException {
-        return new PhoenixConnection(this, url, info, metaData);
+        return new PhoenixConnection(this, url, info, metaData.clone());
     }
 
     @Override
@@ -549,14 +548,14 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addFunction(PFunction function) throws SQLException {
-        return metaData = this.metaData.addFunction(function);
+    public void addFunction(PFunction function) throws SQLException {
+        this.metaData.addFunction(function);
     }
 
     @Override
-    public PMetaData removeFunction(PName tenantId, String function, long functionTimeStamp)
+    public void removeFunction(PName tenantId, String function, long functionTimeStamp)
             throws SQLException {
-        return metaData = this.metaData.removeFunction(tenantId, function, functionTimeStamp);
+        this.metaData.removeFunction(tenantId, function, functionTimeStamp);
     }
 
     @Override
@@ -615,8 +614,8 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addSchema(PSchema schema) throws SQLException {
-        return metaData = this.metaData.addSchema(schema);
+    public void addSchema(PSchema schema) throws SQLException {
+        this.metaData.addSchema(schema);
     }
 
     @Override
@@ -629,8 +628,8 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
-        return metaData = metaData.removeSchema(schema, schemaTimeStamp);
+    public void removeSchema(PSchema schema, long schemaTimeStamp) {
+        metaData.removeSchema(schema, schemaTimeStamp);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 953c73d..99ad59c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -39,7 +39,6 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
@@ -47,7 +46,6 @@ import org.apache.phoenix.schema.Sequence;
 import org.apache.phoenix.schema.SequenceAllocation;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.stats.PTableStats;
-
 import org.apache.tephra.TransactionSystemClient;
 
 
@@ -78,35 +76,35 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
-        return getDelegate().addTable(table, resolvedTime);
+    public void addTable(PTable table, long resolvedTime) throws SQLException {
+        getDelegate().addTable(table, resolvedTime);
     }
     
     @Override
-    public PMetaData updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
-        return getDelegate().updateResolvedTimestamp(table, resolvedTimestamp);
+    public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
+        getDelegate().updateResolvedTimestamp(table, resolvedTimestamp);
     }
 
     @Override
-    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
+    public void addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp,
             long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls,
             boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime)
                     throws SQLException {
-        return getDelegate().addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
+        getDelegate().addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum, isImmutableRows,
                 isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency, isNamespaceMapped,
                 resolvedTime);
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
+    public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp)
             throws SQLException {
-        return getDelegate().removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
+        getDelegate().removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
     }
 
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+    public void removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
             long tableSeqNum, long resolvedTime) throws SQLException {
-        return getDelegate().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
+        getDelegate().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime);
     }
 
     @Override
@@ -279,14 +277,14 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addFunction(PFunction function) throws SQLException {
-        return getDelegate().addFunction(function);
+    public void addFunction(PFunction function) throws SQLException {
+        getDelegate().addFunction(function);
     }
 
     @Override
-    public PMetaData removeFunction(PName tenantId, String function, long functionTimeStamp)
+    public void removeFunction(PName tenantId, String function, long functionTimeStamp)
             throws SQLException {
-        return getDelegate().removeFunction(tenantId, function, functionTimeStamp);
+        getDelegate().removeFunction(tenantId, function, functionTimeStamp);
     }
 
     @Override
@@ -319,8 +317,8 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData addSchema(PSchema schema) throws SQLException {
-        return getDelegate().addSchema(schema);
+    public void addSchema(PSchema schema) throws SQLException {
+        getDelegate().addSchema(schema);
     }
 
     @Override
@@ -334,8 +332,8 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
-        return getDelegate().removeSchema(schema, schemaTimeStamp);
+    public void removeSchema(PSchema schema, long schemaTimeStamp) {
+        getDelegate().removeSchema(schema, schemaTimeStamp);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
index f532dc8..0b6a644 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
@@ -23,7 +23,6 @@ import java.util.List;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 
@@ -36,13 +35,13 @@ import org.apache.phoenix.schema.PTable;
  * @since 0.1
  */
 public interface MetaDataMutated {
-    PMetaData addTable(PTable table, long resolvedTime) throws SQLException;
-    PMetaData updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException;
-    PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException;
-    PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls, boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime) throws SQLException;
-    PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException;
-    PMetaData addFunction(PFunction function) throws SQLException;
-    PMetaData removeFunction(PName tenantId, String function, long functionTimeStamp) throws SQLException;
-    PMetaData addSchema(PSchema schema) throws SQLException;
-    PMetaData removeSchema(PSchema schema, long schemaTimeStamp);
+    void addTable(PTable table, long resolvedTime) throws SQLException;
+    void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException;
+    void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException;
+    void addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls, boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime) throws SQLException;
+    void removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException;
+    void addFunction(PFunction function) throws SQLException;
+    void removeFunction(PName tenantId, String function, long functionTimeStamp) throws SQLException;
+    void addSchema(PSchema schema) throws SQLException;
+    void removeSchema(PSchema schema, long schemaTimeStamp);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
index 6a710eb..cfeb13f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaData.java
@@ -30,9 +30,9 @@ public interface PMetaData extends MetaDataMutated, Iterable<PTable>, Cloneable
     public int size();
     public PMetaData clone();
     public PTableRef getTableRef(PTableKey key) throws TableNotFoundException;
-    public PMetaData pruneTables(Pruner pruner);
+    public void pruneTables(Pruner pruner);
     public PFunction getFunction(PTableKey key) throws FunctionNotFoundException;
-    public PMetaData pruneFunctions(Pruner pruner);
+    public void pruneFunctions(Pruner pruner);
     public long getAge(PTableRef ref);
     public PSchema getSchema(PTableKey key) throws SchemaNotFoundException;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 67a2714..5ffacca 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -28,7 +28,10 @@ import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TimeKeeper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.MinMaxPriorityQueue;
@@ -43,7 +46,8 @@ import com.google.common.primitives.Longs;
  *
  */
 public class PMetaDataImpl implements PMetaData {
-        private static class PMetaDataCache implements Cloneable {
+    private static final Logger logger = LoggerFactory.getLogger(PMetaDataImpl.class);
+        static class PMetaDataCache implements Cloneable {
             private static final int MIN_REMOVAL_SIZE = 3;
             private static final Comparator<PTableRef> COMPARATOR = new Comparator<PTableRef>() {
                 @Override
@@ -239,7 +243,12 @@ public class PMetaDataImpl implements PMetaData {
             }
         }
             
-    private final PMetaDataCache metaData;
+    private PMetaDataCache metaData;
+    
+    @VisibleForTesting
+    public PMetaDataCache getMetaData() {
+        return metaData;
+    }
     
     public PMetaDataImpl(int initialCapacity, long maxByteSize) {
         this.metaData = new PMetaDataCache(initialCapacity, maxByteSize, TimeKeeper.SYSTEM);
@@ -250,12 +259,12 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     private PMetaDataImpl(PMetaDataCache metaData) {
-        this.metaData = metaData.clone();
+        this.metaData = metaData;
     }
     
     @Override
     public PMetaDataImpl clone() {
-        return new PMetaDataImpl(this.metaData);
+        return new PMetaDataImpl(new PMetaDataCache(this.metaData));
     }
     
     @Override
@@ -282,14 +291,12 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public PMetaData updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
-    	PMetaDataCache clone = metaData.clone();
-    	clone.putDuplicate(table.getKey(), table, resolvedTimestamp);
-    	return new PMetaDataImpl(clone);
+    public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
+    	metaData.putDuplicate(table.getKey(), table, resolvedTimestamp);
     }
 
     @Override
-    public PMetaData addTable(PTable table, long resolvedTime) throws SQLException {
+    public void addTable(PTable table, long resolvedTime) throws SQLException {
         int netGain = 0;
         PTableKey key = table.getKey();
         PTableRef oldTableRef = metaData.get(key);
@@ -323,28 +330,27 @@ public class PMetaDataImpl implements PMetaData {
             netGain += table.getEstimatedSize();
         }
         long overage = metaData.getCurrentSize() + netGain - metaData.getMaxSize();
-        PMetaDataCache newMetaData = overage <= 0 ? metaData.clone() : metaData.cloneMinusOverage(overage);
+        metaData = overage <= 0 ? metaData : metaData.cloneMinusOverage(overage);
         
         if (newParentTable != null) { // Upsert new index table into parent data table list
-            newMetaData.put(newParentTable.getKey(), newParentTable, parentResolvedTimestamp);
-            newMetaData.putDuplicate(table.getKey(), table, resolvedTime);
+            metaData.put(newParentTable.getKey(), newParentTable, parentResolvedTimestamp);
+            metaData.putDuplicate(table.getKey(), table, resolvedTime);
         } else {
-            newMetaData.put(table.getKey(), table, resolvedTime);
+            metaData.put(table.getKey(), table, resolvedTime);
         }
         for (PTable index : table.getIndexes()) {
-            newMetaData.putDuplicate(index.getKey(), index, resolvedTime);
+            metaData.putDuplicate(index.getKey(), index, resolvedTime);
         }
-        return new PMetaDataImpl(newMetaData);
     }
 
     @Override
-    public PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp,
+    public void addColumn(PName tenantId, String tableName, List<PColumn> columnsToAdd, long tableTimeStamp,
             long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls,
             boolean isTransactional, long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime)
                     throws SQLException {
         PTableRef oldTableRef = metaData.get(new PTableKey(tenantId, tableName));
         if (oldTableRef == null) {
-            return this;
+            return;
         }
         List<PColumn> oldColumns = PTableImpl.getColumnsToClone(oldTableRef.getTable());
         List<PColumn> newColumns;
@@ -358,12 +364,11 @@ public class PMetaDataImpl implements PMetaData {
         PTable newTable = PTableImpl.makePTable(oldTableRef.getTable(), tableTimeStamp, tableSeqNum, newColumns,
                 isImmutableRows, isWalDisabled, isMultitenant, storeNulls, isTransactional, updateCacheFrequency,
                 isNamespaceMapped);
-        return addTable(newTable, resolvedTime);
+        addTable(newTable, resolvedTime);
     }
 
     @Override
-    public PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
-        PMetaDataCache tables = null;
+    public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException {
         PTableRef parentTableRef = null;
         PTableKey key = new PTableKey(tenantId, tableName);
         if (metaData.get(key) == null) {
@@ -371,16 +376,15 @@ public class PMetaDataImpl implements PMetaData {
                 parentTableRef = metaData.get(new PTableKey(tenantId, parentTableName));
             }
             if (parentTableRef == null) {
-                return this;
+                return;
             }
         } else {
-            tables = metaData.clone();
-            PTable table = tables.remove(key);
+            PTable table = metaData.remove(key);
             for (PTable index : table.getIndexes()) {
-                tables.remove(index.getKey());
+                metaData.remove(index.getKey());
             }
             if (table.getParentName() != null) {
-                parentTableRef = tables.get(new PTableKey(tenantId, table.getParentName().getString()));
+                parentTableRef = metaData.get(new PTableKey(tenantId, table.getParentName().getString()));
             }
         }
         // also remove its reference from parent table
@@ -397,26 +401,22 @@ public class PMetaDataImpl implements PMetaData {
                                 parentTableRef.getTable(),
                                 tableTimeStamp == HConstants.LATEST_TIMESTAMP ? parentTableRef.getTable().getTimeStamp() : tableTimeStamp,
                                 newIndexes);
-                        if (tables == null) { 
-                            tables = metaData.clone();
-                        }
-                        tables.put(parentTable.getKey(), parentTable, parentTableRef.getResolvedTimeStamp());
+                        metaData.put(parentTable.getKey(), parentTable, parentTableRef.getResolvedTimeStamp());
                         break;
                     }
                 }
             }
         }
-        return tables == null ? this : new PMetaDataImpl(tables);
     }
     
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException {
+    public void removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException {
         PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName));
         if (tableRef == null) {
-            return this;
+            return;
         }
         PTable table = tableRef.getTable();
-        PMetaDataCache tables = metaData.clone();
+        PMetaDataCache tables = metaData;
         for (PColumn columnToRemove : columnsToRemove) {
             PColumn column;
             String familyName = columnToRemove.getFamilyName().getString();
@@ -445,25 +445,21 @@ public class PMetaDataImpl implements PMetaData {
             table = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
         }
         tables.put(table.getKey(), table, resolvedTime);
-        return new PMetaDataImpl(tables);
     }
 
     @Override
-    public PMetaData pruneTables(Pruner pruner) {
+    public void pruneTables(Pruner pruner) {
         List<PTableKey> keysToPrune = Lists.newArrayListWithExpectedSize(this.size());
         for (PTable table : this) {
             if (pruner.prune(table)) {
                 keysToPrune.add(table.getKey());
             }
         }
-        if (keysToPrune.isEmpty()) {
-            return this;
-        }
-        PMetaDataCache tables = metaData.clone();
-        for (PTableKey key : keysToPrune) {
-            tables.remove(key);
+        if (!keysToPrune.isEmpty()) {
+            for (PTableKey key : keysToPrune) {
+                metaData.remove(key);
+            }
         }
-        return new PMetaDataImpl(tables);
     }
 
     @Override
@@ -472,35 +468,29 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public PMetaData addFunction(PFunction function) throws SQLException {
+    public void addFunction(PFunction function) throws SQLException {
         this.metaData.functions.put(function.getKey(), function);
-        return this;
     }
 
     @Override
-    public PMetaData removeFunction(PName tenantId, String function, long functionTimeStamp)
+    public void removeFunction(PName tenantId, String function, long functionTimeStamp)
             throws SQLException {
         this.metaData.functions.remove(new PTableKey(tenantId, function));
-        return this;
     }
 
     @Override
-    public PMetaData pruneFunctions(Pruner pruner) {
+    public void pruneFunctions(Pruner pruner) {
         List<PTableKey> keysToPrune = Lists.newArrayListWithExpectedSize(this.size());
         for (PFunction function : this.metaData.functions.values()) {
             if (pruner.prune(function)) {
                 keysToPrune.add(function.getKey());
             }
         }
-        if (keysToPrune.isEmpty()) {
-            return this;
-        }
-        PMetaDataCache clone = metaData.clone();
-        for (PTableKey key : keysToPrune) {
-            clone.functions.remove(key);
+        if (!keysToPrune.isEmpty()) {
+            for (PTableKey key : keysToPrune) {
+                metaData.functions.remove(key);
+            }
         }
-        return new PMetaDataImpl(clone);
-    
     }
 
     @Override
@@ -509,9 +499,8 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public PMetaData addSchema(PSchema schema) throws SQLException {
+    public void addSchema(PSchema schema) throws SQLException {
         this.metaData.schemas.put(schema.getSchemaKey(), schema);
-        return this;
     }
 
     @Override
@@ -522,8 +511,8 @@ public class PMetaDataImpl implements PMetaData {
     }
 
     @Override
-    public PMetaData removeSchema(PSchema schema, long schemaTimeStamp) {
+    public void removeSchema(PSchema schema, long schemaTimeStamp) {
         this.metaData.schemas.remove(SchemaUtil.getSchemaKey(schema.getSchemaName()));
-        return this;
     }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c22020a4/phoenix-core/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java
new file mode 100644
index 0000000..af4bc60
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import java.sql.SQLException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import javax.annotation.concurrent.GuardedBy;
+
+import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PSchema;
+
+public class PSynchronizedMetaData implements PMetaData {
+
+    @GuardedBy("readWriteLock")
+    private PMetaData delegate;
+    private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+
+    public PSynchronizedMetaData(PMetaData metadata) {
+        this.delegate = metadata;
+    }
+    
+    @Override
+    public Iterator<PTable> iterator() {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.iterator();
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public int size() {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.size();
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public PMetaData clone() {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.clone();
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public void addTable(PTable table, long resolvedTime) throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.addTable(table, resolvedTime);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public PTableRef getTableRef(PTableKey key) throws TableNotFoundException {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.getTableRef(key);
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.updateResolvedTimestamp(table, resolvedTimestamp);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public void pruneTables(Pruner pruner) {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.pruneTables(pruner);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public PFunction getFunction(PTableKey key) throws FunctionNotFoundException {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.getFunction(key);
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public void removeTable(PName tenantId, String tableName, String parentTableName,
+            long tableTimeStamp) throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.removeTable(tenantId, tableName, parentTableName, tableTimeStamp);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public void pruneFunctions(Pruner pruner) {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.pruneFunctions(pruner);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public long getAge(PTableRef ref) {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.getAge(ref);
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public void addColumn(PName tenantId, String tableName, List<PColumn> columns,
+            long tableTimeStamp, long tableSeqNum, boolean isImmutableRows, boolean isWalDisabled,
+            boolean isMultitenant, boolean storeNulls, boolean isTransactional,
+            long updateCacheFrequency, boolean isNamespaceMapped, long resolvedTime)
+            throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.addColumn(tenantId, tableName, columns, tableTimeStamp, tableSeqNum,
+                isImmutableRows, isWalDisabled, isMultitenant, storeNulls, isTransactional,
+                updateCacheFrequency, isNamespaceMapped, resolvedTime);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public PSchema getSchema(PTableKey key) throws SchemaNotFoundException {
+        readWriteLock.readLock().lock();
+        try {
+            return delegate.getSchema(key);
+        }
+        finally {
+            readWriteLock.readLock().unlock();
+        }
+    }
+
+    @Override
+    public void removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove,
+            long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum,
+                resolvedTime);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public void addFunction(PFunction function) throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.addFunction(function);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public void removeFunction(PName tenantId, String function, long functionTimeStamp)
+            throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.removeFunction(tenantId, function, functionTimeStamp);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public void addSchema(PSchema schema) throws SQLException {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.addSchema(schema);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    @Override
+    public void removeSchema(PSchema schema, long schemaTimeStamp) {
+        readWriteLock.writeLock().lock();
+        try {
+            delegate.removeSchema(schema, schemaTimeStamp);
+        }
+        finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+}


[10/16] phoenix git commit: PHOENIX-2995 Write performance severely degrades with large number of views (addendum)

Posted by td...@apache.org.
 PHOENIX-2995 Write performance severely degrades with large number of views (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/98e665ff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/98e665ff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/98e665ff

Branch: refs/heads/4.x-HBase-1.0
Commit: 98e665fffae832cb82a475a14efa02ceb228efc3
Parents: c22020a
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Wed Aug 17 11:55:12 2016 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Wed Aug 17 11:55:12 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java   | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/98e665ff/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 0d09e75..d488eca 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -113,8 +113,6 @@ import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.tephra.TransactionContext;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.TraceScope;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Objects;


[04/16] phoenix git commit: PHOENIX-3152 Incorrect comparator in QueryOptimizer may cause IllegalArgumentException

Posted by td...@apache.org.
PHOENIX-3152 Incorrect comparator in QueryOptimizer may cause IllegalArgumentException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/62d45662
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/62d45662
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/62d45662

Branch: refs/heads/4.x-HBase-1.0
Commit: 62d45662be555701a0ca66ea5eb5d920b4b15e04
Parents: 6198bf7
Author: Sergey Soldatov <ss...@apache.org>
Authored: Sat Aug 6 16:31:13 2016 -0700
Committer: Sergey Soldatov <ss...@apache.org>
Committed: Sat Aug 6 19:46:23 2016 -0700

----------------------------------------------------------------------
 .../apache/phoenix/optimize/QueryOptimizer.java | 21 +++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d45662/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index e4198ee..bd9c811 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -383,35 +383,38 @@ public class QueryOptimizer {
                 // For shared indexes (i.e. indexes on views and local indexes),
                 // a) add back any view constants as these won't be in the index, and
                 // b) ignore the viewIndexId which will be part of the row key columns.
-                int c = (plan2.getContext().getScanRanges().getBoundPkColumnCount() + (table2.getViewIndexId() == null ? 0 : (boundRanges - 1))) - 
+                int c = (plan2.getContext().getScanRanges().getBoundPkColumnCount() + (table2.getViewIndexId() == null ? 0 : (boundRanges - 1))) -
                         (plan1.getContext().getScanRanges().getBoundPkColumnCount() + (table1.getViewIndexId() == null ? 0 : (boundRanges - 1)));
                 if (c != 0) return c;
-                if (plan1.getGroupBy()!=null && plan2.getGroupBy()!=null) {
+                if (plan1.getGroupBy() != null && plan2.getGroupBy() != null) {
                     if (plan1.getGroupBy().isOrderPreserving() != plan2.getGroupBy().isOrderPreserving()) {
                         return plan1.getGroupBy().isOrderPreserving() ? -1 : 1;
-                    } 
+                    }
                 }
                 // Use smaller table (table with fewest kv columns)
                 c = (table1.getColumns().size() - table1.getPKColumns().size()) - (table2.getColumns().size() - table2.getPKColumns().size());
                 if (c != 0) return c;
-                
+
                 // If all things are equal, don't choose local index as it forces scan
                 // on every region (unless there's no start/stop key)
-                if (table1.getIndexType() == IndexType.LOCAL) {
+
+                if (table1.getIndexType() == IndexType.LOCAL && table2.getIndexType() !=
+                        IndexType.LOCAL) {
                     return plan1.getContext().getScanRanges().getRanges().isEmpty() ? -1 : 1;
                 }
-                if (table2.getIndexType() == IndexType.LOCAL) {
+                if (table2.getIndexType() == IndexType.LOCAL && table1.getIndexType() !=
+                        IndexType.LOCAL) {
                     return plan2.getContext().getScanRanges().getRanges().isEmpty() ? 1 : -1;
                 }
 
                 // All things being equal, just use the table based on the Hint.USE_DATA_OVER_INDEX_TABLE
-                if (table1.getType() == PTableType.INDEX) {
+
+                if (table1.getType() == PTableType.INDEX && table2.getType() != PTableType.INDEX) {
                     return comparisonOfDataVersusIndexTable;
                 }
-                if (table2.getType() == PTableType.INDEX) {
+                if (table2.getType() == PTableType.INDEX && table1.getType() != PTableType.INDEX) {
                     return -comparisonOfDataVersusIndexTable;
                 }
-                
                 return 0;
             }