You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by pb...@apache.org on 2018/04/19 21:02:15 UTC

[1/5] phoenix git commit: PHOENIX-4690 GroupBy expressions should follow the order of PK Columns if GroupBy is orderPreserving [Forced Update!]

Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.12 0860dec72 -> 3fc3c5f3f (forced update)


PHOENIX-4690 GroupBy expressions should follow the order of PK Columns if GroupBy is orderPreserving


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3939c8dd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3939c8dd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3939c8dd

Branch: refs/heads/4.x-cdh5.12
Commit: 3939c8dd936dfb788dd019d34e5587d443dd3c1d
Parents: 9fc11de
Author: chenglei <ch...@apache.org>
Authored: Wed Apr 18 17:59:38 2018 +0800
Committer: chenglei <ch...@apache.org>
Committed: Wed Apr 18 17:59:38 2018 +0800

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/AggregateIT.java | 109 +++++++++++++++---
 .../org/apache/phoenix/end2end/OrderByIT.java   |  18 +--
 .../apache/phoenix/end2end/join/SubqueryIT.java |   8 +-
 .../join/SubqueryUsingSortMergeJoinIT.java      |  12 +-
 .../apache/phoenix/compile/GroupByCompiler.java |  19 +++-
 .../phoenix/compile/OrderPreservingTracker.java |  13 +++
 .../phoenix/compile/QueryCompilerTest.java      | 110 +++++++++++++++++++
 .../java/org/apache/phoenix/util/TestUtil.java  |  20 ++++
 8 files changed, 268 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
index fd1d660..2059311 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.apache.phoenix.util.TestUtil.assertResultSet;
 
 import java.io.IOException;
 import java.sql.Connection;
@@ -1016,20 +1017,100 @@ public class AggregateIT extends ParallelStatsDisabledIT {
         }
     }
 
-    private void assertResultSet(ResultSet rs,Object[][] rows) throws Exception {
-        for(int rowIndex=0;rowIndex<rows.length;rowIndex++) {
-            assertTrue(rs.next());
-            for(int columnIndex=1;columnIndex<= rows[rowIndex].length;columnIndex++) {
-                Object realValue=rs.getObject(columnIndex);
-                Object expectedValue=rows[rowIndex][columnIndex-1];
-                if(realValue==null) {
-                    assertTrue(expectedValue==null);
-                }
-                else {
-                    assertTrue(realValue.equals(expectedValue));
-                }
+    @Test
+    public void testGroupByOrderMatchPkColumnOrderBug4690() throws Exception {
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, false);
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, true);
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, false);
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, true);
+    }
+
+    private void doTestGroupByOrderMatchPkColumnOrderBug4690(boolean desc ,boolean salted) throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = null;
+        try {
+            conn = DriverManager.getConnection(getUrl(), props);
+            String tableName = generateUniqueName();
+            String sql = "create table " + tableName + "( "+
+                    " pk1 integer not null , " +
+                    " pk2 integer not null, " +
+                    " pk3 integer not null," +
+                    " pk4 integer not null,"+
+                    " v integer, " +
+                    " CONSTRAINT TEST_PK PRIMARY KEY ( "+
+                       "pk1 "+(desc ? "desc" : "")+", "+
+                       "pk2 "+(desc ? "desc" : "")+", "+
+                       "pk3 "+(desc ? "desc" : "")+", "+
+                       "pk4 "+(desc ? "desc" : "")+
+                    " )) "+(salted ? "SALT_BUCKETS =4" : "split on(2)");
+            conn.createStatement().execute(sql);
+
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (1,8,10,20,30)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (1,8,11,21,31)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (1,9,5 ,22,32)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (1,9,6 ,12,33)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (1,9,6 ,13,34)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (1,9,7 ,8,35)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (2,3,15,25,35)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (2,7,16,26,36)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (2,7,17,27,37)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (3,2,18,28,38)");
+            conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES (3,2,19,29,39)");
+            conn.commit();
+
+            sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2,pk1";
+            ResultSet rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{2,3,2L},{3,2,1L},{7,2,2L},{8,1,2L},{9,1,4L}});
+
+            sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1,pk2";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{1,8,2L},{1,9,4L},{2,3,1L},{2,7,2L},{3,2,2L}});
+
+            sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2 desc,pk1 desc";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{9,1,4L},{8,1,2L},{7,2,2L},{3,2,1L},{2,3,2L}});
+
+            sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1 desc,pk2 desc";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{3,2,2L},{2,7,2L},{2,3,1L},{1,9,4L},{1,8,2L}});
+
+
+            sql = "select pk3,pk2,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk3,pk2";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{5,9,1L},{6,9,2L},{7,9,1L},{10,8,1L},{11,8,1L}});
+
+            sql = "select pk2,pk3,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk2,pk3";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{8,10,1L},{8,11,1L},{9,5,1L},{9,6,2L},{9,7,1L}});
+
+            sql = "select pk3,pk2,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk3 desc,pk2 desc";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{11,8,1L},{10,8,1L},{7,9,1L},{6,9,2L},{5,9,1L}});
+
+            sql = "select pk2,pk3,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk2 desc,pk3 desc";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{9,7,1L},{9,6,2L},{9,5,1L},{8,11,1L},{8,10,1L}});
+
+
+            sql = "select pk4,pk3,pk1,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk4,pk3,pk1";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{8,7,1,1L},{12,6,1,1L},{13,6,1,1L},{22,5,1,1L}});
+
+            sql = "select pk1,pk3,pk4,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk1,pk3,pk4";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{1,5,22,1L},{1,6,12,1L},{1,6,13,1L},{1,7,8,1L}});
+
+            sql = "select pk4,pk3,pk1,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk4 desc,pk3 desc,pk1 desc";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{22,5,1,1L},{13,6,1,1L},{12,6,1,1L},{8,7,1,1L}});
+
+            sql = "select pk1,pk3,pk4,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk1 desc,pk3 desc,pk4 desc";
+            rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{1,7,8,1L},{1,6,13,1L},{1,6,12,1L},{1,5,22,1L}});
+        } finally {
+            if(conn != null) {
+                conn.close();
             }
         }
-        assertTrue(!rs.next());
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
index 3bce9c7..9d6a450 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
@@ -30,6 +30,7 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.apache.phoenix.util.TestUtil.assertResultSet;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -1180,21 +1181,4 @@ public class OrderByIT extends ParallelStatsDisabledIT {
         }
     }
 
-    private void assertResultSet(ResultSet rs,Object[][] rows) throws Exception {
-        for(int rowIndex=0;rowIndex<rows.length;rowIndex++) {
-            assertTrue(rs.next());
-            for(int columnIndex=1;columnIndex<= rows[rowIndex].length;columnIndex++) {
-                Object realValue=rs.getObject(columnIndex);
-                Object expectedValue=rows[rowIndex][columnIndex-1];
-                if(realValue==null) {
-                    assertTrue(expectedValue==null);
-                }
-                else {
-                    assertTrue(realValue.equals(expectedValue));
-                }
-            }
-        }
-        assertTrue(!rs.next());
-    }
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryIT.java
index 1da98d2..4a11b41 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryIT.java
@@ -148,7 +148,7 @@ public class SubqueryIT extends BaseJoinIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -156,7 +156,7 @@ public class SubqueryIT extends BaseJoinIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -220,7 +220,7 @@ public class SubqueryIT extends BaseJoinIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ITEM_TABLE_FULL_NAME + " \\[1\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" + 
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
@@ -229,7 +229,7 @@ public class SubqueryIT extends BaseJoinIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ITEM_TABLE_FULL_NAME + " \\[1\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" + 
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryUsingSortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryUsingSortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryUsingSortMergeJoinIT.java
index 9335065..665908f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryUsingSortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/SubqueryUsingSortMergeJoinIT.java
@@ -136,7 +136,8 @@ public class SubqueryUsingSortMergeJoinIT extends BaseJoinIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "        CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n"+
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -145,7 +146,8 @@ public class SubqueryUsingSortMergeJoinIT extends BaseJoinIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "    CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n"+
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -200,8 +202,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseJoinIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ITEM_TABLE_FULL_NAME + " \\[1\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "        CLIENT MERGE SORT\n" + 
+                "        CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -210,8 +213,9 @@ public class SubqueryUsingSortMergeJoinIT extends BaseJoinIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ITEM_TABLE_FULL_NAME + " \\[1\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" + 
+                "    CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_FULL_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
index a405317..0a9e1bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
@@ -153,9 +153,24 @@ public class GroupByCompiler {
                 // column and use each subsequent one in PK order).
                 isOrderPreserving = tracker.isOrderPreserving();
                 orderPreservingColumnCount = tracker.getOrderPreservingColumnCount();
+                if(isOrderPreserving) {
+                    //reorder the groupby expressions following pk columns
+                    List<Expression> newExpressions = tracker.getExpressionsFromOrderPreservingTrackInfos();
+                    assert newExpressions.size() == expressions.size();
+                    return new GroupBy.GroupByBuilder(this)
+                               .setIsOrderPreserving(isOrderPreserving)
+                               .setOrderPreservingColumnCount(orderPreservingColumnCount)
+                               .setExpressions(newExpressions)
+                               .setKeyExpressions(newExpressions)
+                               .build();
+                }
             }
-            if (isOrderPreserving || isUngroupedAggregate) {
-                return new GroupBy.GroupByBuilder(this).setIsOrderPreserving(isOrderPreserving).setOrderPreservingColumnCount(orderPreservingColumnCount).build();
+
+            if (isUngroupedAggregate) {
+                return new GroupBy.GroupByBuilder(this)
+                           .setIsOrderPreserving(isOrderPreserving)
+                           .setOrderPreservingColumnCount(orderPreservingColumnCount)
+                           .build();
             }
             List<Expression> expressions = Lists.newArrayListWithExpectedSize(this.expressions.size());
             List<Expression> keyExpressions = expressions;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
index dab2078..d1175f6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
@@ -9,6 +9,7 @@
  */
 package org.apache.phoenix.compile;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Iterator;
@@ -49,6 +50,7 @@ public class OrderPreservingTracker {
         public final OrderPreserving orderPreserving;
         public final int pkPosition;
         public final int slotSpan;
+        public Expression expression;
 
         public Info(int pkPosition) {
             this.pkPosition = pkPosition;
@@ -141,6 +143,7 @@ public class OrderPreservingTracker {
                         return;
                     }
                 }
+                info.expression = node;
                 orderPreservingInfos.add(info);
             }
         }
@@ -153,6 +156,16 @@ public class OrderPreservingTracker {
         return orderPreservingColumnCount;
     }
 
+    public List<Expression> getExpressionsFromOrderPreservingTrackInfos() {
+        assert isOrderPreserving;
+        assert (this.orderPreservingInfos != null && this.orderPreservingInfos.size() > 0);
+        List<Expression> newExpressions = new ArrayList<Expression>(this.orderPreservingInfos.size());
+        for(Info trackInfo : this.orderPreservingInfos) {
+            newExpressions.add(trackInfo.expression);
+        }
+        return newExpressions;
+    }
+
     public boolean isOrderPreserving() {
         if (!isOrderPreserving) {
             return false;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index bac4ee8..07bd3a9 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -98,6 +98,7 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
 import org.junit.Ignore;
 import org.junit.Test;
 
@@ -4850,4 +4851,113 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
             return Collections.emptyList();
         }
     }
+
+    @Test
+    public void testGroupByOrderMatchPkColumnOrder4690() throws Exception{
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, false);
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, true);
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, false);
+        this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, true);
+    }
+
+    private void doTestGroupByOrderMatchPkColumnOrderBug4690(boolean desc ,boolean salted) throws Exception {
+        Connection conn = null;
+        try {
+            conn = DriverManager.getConnection(getUrl());
+            String tableName = generateUniqueName();
+            String sql = "create table " + tableName + "( "+
+                    " pk1 integer not null , " +
+                    " pk2 integer not null, " +
+                    " pk3 integer not null," +
+                    " pk4 integer not null,"+
+                    " v integer, " +
+                    " CONSTRAINT TEST_PK PRIMARY KEY ( "+
+                       "pk1 "+(desc ? "desc" : "")+", "+
+                       "pk2 "+(desc ? "desc" : "")+", "+
+                       "pk3 "+(desc ? "desc" : "")+", "+
+                       "pk4 "+(desc ? "desc" : "")+
+                    " )) "+(salted ? "SALT_BUCKETS =4" : "split on(2)");
+            conn.createStatement().execute(sql);
+
+            sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2,pk1";
+            QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() ==2);
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK2"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK1"));
+
+            sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1,pk2";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY));
+
+            sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2 desc,pk1 desc";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() ==2);
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK2 DESC"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK1 DESC"));
+
+            sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1 desc,pk2 desc";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY));
+
+
+            sql = "select pk3,pk2,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk3,pk2";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2);
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK2"));
+
+            sql = "select pk2,pk3,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk2,pk3";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY));
+
+            sql = "select pk3,pk2,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk3 desc,pk2 desc";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2);
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3 DESC"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK2 DESC"));
+
+            sql = "select pk2,pk3,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk2 desc,pk3 desc";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY));
+
+
+            sql = "select pk4,pk3,pk1,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk4,pk3,pk1";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 3);
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK4"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK3"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(2).toString().equals("PK1"));
+
+            sql = "select pk1,pk3,pk4,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk1,pk3,pk4";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY));
+
+            sql = "select pk4,pk3,pk1,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk4 desc,pk3 desc,pk1 desc";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 3);
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK4 DESC"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK3 DESC"));
+            assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(2).toString().equals("PK1 DESC"));
+
+            sql = "select pk1,pk3,pk4,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk1 desc,pk3 desc,pk4 desc";
+            queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql);
+            assertTrue(queryPlan.getGroupBy().isOrderPreserving());
+            assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY));
+        } finally {
+            if(conn != null) {
+                conn.close();
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3939c8dd/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index a06fd69..277e257 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -1043,4 +1043,24 @@ public class TestUtil {
         return queryPlan;
     }
 
+    public static void assertResultSet(ResultSet rs,Object[][] rows) throws Exception {
+        for(int rowIndex=0; rowIndex < rows.length; rowIndex++) {
+            assertTrue("rowIndex:["+rowIndex+"] rs.next error!",rs.next());
+            for(int columnIndex = 1; columnIndex <= rows[rowIndex].length; columnIndex++) {
+                Object realValue = rs.getObject(columnIndex);
+                Object expectedValue = rows[rowIndex][columnIndex-1];
+                if(realValue == null) {
+                    assertNull("rowIndex:["+rowIndex+"],columnIndex:["+columnIndex+"]",expectedValue);
+                }
+                else {
+                    assertEquals("rowIndex:["+rowIndex+"],columnIndex:["+columnIndex+"],realValue:["+
+                            realValue+"],expectedValue:["+expectedValue+"]",
+                            expectedValue,
+                            realValue
+                            );
+                }
+            }
+        }
+        assertTrue(!rs.next());
+    }
 }


[4/5] phoenix git commit: PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov)

Posted by pb...@apache.org.
PHOENIX-4298 refactoring to avoid using deprecated API for Put/Delete(Sergey Soldatov)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9948036b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9948036b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9948036b

Branch: refs/heads/4.x-cdh5.12
Commit: 9948036b674883576c666ec492cb3b46e526b1f0
Parents: 0743678
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Apr 19 12:49:20 2018 +0100
Committer: Pedro Boado <pb...@apache.org>
Committed: Thu Apr 19 21:58:30 2018 +0100

----------------------------------------------------------------------
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  2 +-
 .../end2end/ColumnProjectionOptimizationIT.java | 14 ++++----
 .../apache/phoenix/end2end/DynamicColumnIT.java | 12 +++----
 .../apache/phoenix/end2end/DynamicFamilyIT.java | 26 +++++++-------
 .../phoenix/end2end/MappingTableDataTypeIT.java |  4 +--
 .../phoenix/end2end/NativeHBaseTypesIT.java     | 30 ++++++++--------
 .../end2end/QueryDatabaseMetaDataIT.java        |  4 +--
 .../org/apache/phoenix/end2end/UpgradeIT.java   |  4 +--
 .../phoenix/tx/ParameterizedTransactionIT.java  |  4 +--
 .../coprocessor/MetaDataEndpointImpl.java       | 36 ++++++++++----------
 .../UngroupedAggregateRegionObserver.java       |  2 +-
 .../apache/phoenix/index/IndexMaintainer.java   | 12 +++----
 .../query/ConnectionQueryServicesImpl.java      |  2 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  4 +--
 .../phoenix/schema/stats/StatisticsWriter.java  |  9 +++--
 .../java/org/apache/phoenix/util/IndexUtil.java |  6 ++--
 .../wal/ReadWriteKeyValuesWithCodecTest.java    |  6 ++--
 .../index/covered/CoveredColumnIndexCodec.java  |  2 +-
 .../index/covered/LocalTableStateTest.java      | 10 +++---
 .../covered/TestCoveredColumnIndexCodec.java    |  6 ++--
 .../hbase/index/write/TestIndexWriter.java      | 10 ++----
 .../index/write/TestParalleIndexWriter.java     |  2 +-
 .../write/TestParalleWriterIndexCommitter.java  |  2 +-
 .../index/write/TestWALRecoveryCaching.java     |  4 +--
 .../recovery/TestPerRegionIndexWriteCache.java  |  4 +--
 .../java/org/apache/phoenix/util/TestUtil.java  |  4 +--
 26 files changed, 106 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 542e640..3ca35c2 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -210,7 +210,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     //make an attempted write to the primary that should also be indexed
     byte[] rowkey = Bytes.toBytes("indexed_row_key");
     Put p = new Put(rowkey);
-    p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
+    p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
     region.put(p);
 
     // we should then see the server go down

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
index e4ff66f..43dc302 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
@@ -242,19 +242,19 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT {
         try {
             htable = conn2.getQueryServices().getTable(htableName);
             Put put = new Put(PInteger.INSTANCE.toBytes(1));
-            put.add(cfB, c1, PInteger.INSTANCE.toBytes(1));
-            put.add(cfC, c2, PLong.INSTANCE.toBytes(2));
+            put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(1));
+            put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(2));
             htable.put(put);
 
             put = new Put(PInteger.INSTANCE.toBytes(2));
-            put.add(cfC, c2, PLong.INSTANCE.toBytes(10));
-            put.add(cfC, c3, PVarchar.INSTANCE.toBytes("abcd"));
+            put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(10));
+            put.addColumn(cfC, c3, PVarchar.INSTANCE.toBytes("abcd"));
             htable.put(put);
 
             put = new Put(PInteger.INSTANCE.toBytes(3));
-            put.add(cfB, c1, PInteger.INSTANCE.toBytes(3));
-            put.add(cfC, c2, PLong.INSTANCE.toBytes(10));
-            put.add(cfC, c3, PVarchar.INSTANCE.toBytes("abcd"));
+            put.addColumn(cfB, c1, PInteger.INSTANCE.toBytes(3));
+            put.addColumn(cfC, c2, PLong.INSTANCE.toBytes(10));
+            put.addColumn(cfC, c3, PVarchar.INSTANCE.toBytes("abcd"));
             htable.put(put);
 
             conn2.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
index f55d01a..04402cd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
@@ -88,12 +88,12 @@ public class DynamicColumnIT extends ParallelStatsDisabledIT {
                 byte[] key = Bytes.toBytes("entry1");
 
                 Put put = new Put(key);
-                put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
-                put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
-                put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
-                put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
-                put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
-                put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
+                put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
+                put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
+                put.addColumn(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
+                put.addColumn(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
+                put.addColumn(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
+                put.addColumn(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
                 mutations.add(put);
 
                 hTable.batch(mutations);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
index 866a8d2..acae6ee 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
@@ -115,25 +115,25 @@ public class DynamicFamilyIT extends ParallelStatsDisabledIT {
             Put put;
             List<Row> mutations = new ArrayList<Row>();
             put = new Put(Bytes.toBytes("entry1"));
-            put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
-            put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME));
+            put.addColumn(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
+            put.addColumn(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME));
             mutations.add(put);
             
             put = new Put(Bytes.toBytes("entry2"));
-            put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
-            put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME));
+            put.addColumn(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
+            put.addColumn(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME));
             mutations.add(put);
             
             put = new Put(Bytes.toBytes("entry3"));
-            put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
-            put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME));
-            put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
+            put.addColumn(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
+            put.addColumn(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME));
+            put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
             mutations.add(put);
 
             hTable.batch(mutations);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index 52e22bf..043907b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -107,8 +107,8 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
     private void insertData(final byte[] tableName, HBaseAdmin admin, HTableInterface t) throws IOException,
             InterruptedException {
         Put p = new Put(Bytes.toBytes("row"));
-        p.add(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1"));
-        p.add(Bytes.toBytes("cf2"), Bytes.toBytes("q2"), Bytes.toBytes("value2"));
+        p.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1"));
+        p.addColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"), Bytes.toBytes("value2"));
         t.put(p);
         t.flushCommits();
         admin.flush(tableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
index 127c25a..50563d4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
@@ -96,8 +96,8 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
             
             bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
             put = new Put(key);
-            put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5000));
-            put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50000L));
+            put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5000));
+            put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50000L));
             mutations.add(put);
             // FIXME: the version of the Delete constructor without the lock args was introduced
             // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
@@ -106,30 +106,30 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
             Delete del = new Delete(key, ts);
             mutations.add(del);
             put = new Put(key);
-            put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(2000));
-            put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(20000L));
+            put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(2000));
+            put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(20000L));
             mutations.add(put);
             
             key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
             put = new Put(key);
-            put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5));
-            put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50L));
+            put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(5));
+            put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(50L));
             mutations.add(put);
             put = new Put(key);
-            put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10));
-            put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
+            put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10));
+            put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
             mutations.add(put);
             
             key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
             put = new Put(key);
-            put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(3000));
-            put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(30000L));
+            put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(3000));
+            put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(30000L));
             mutations.add(put);
             
             key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
             put = new Put(key);
-            put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(4000));
-            put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(40000L));
+            put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(4000));
+            put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(40000L));
             mutations.add(put);
             
             hTable.batch(mutations);
@@ -286,9 +286,9 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
         // negative number for an unsigned type
         key = ByteUtil.concat(Bytes.toBytes(-10), Bytes.toBytes(100L), Bytes.toBytes("e"));
         put = new Put(key);
-        put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10));
-        put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
-        put.add(family, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY);
+        put.addColumn(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10));
+        put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
+        put.addColumn(family, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY);
         mutations.add(put);
         hTable.batch(mutations);
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index 54cb5da..1386272 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -899,8 +899,8 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
                     pconn.getQueryServices()
                             .getTable(SchemaUtil.getTableNameAsBytes(schemaName, tableName));
             Put put = new Put(Bytes.toBytes("0"));
-            put.add(cfB, Bytes.toBytes("COL1"), PInteger.INSTANCE.toBytes(1));
-            put.add(cfC, Bytes.toBytes("COL2"), PLong.INSTANCE.toBytes(2));
+            put.addColumn(cfB, Bytes.toBytes("COL1"), PInteger.INSTANCE.toBytes(1));
+            put.addColumn(cfC, Bytes.toBytes("COL2"), PLong.INSTANCE.toBytes(2));
             htable.put(put);
 
             // Should be ok b/c we've marked the view with IMMUTABLE_ROWS=true

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index b71dd7c..48a49b2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -624,7 +624,7 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
                     schemaName == null ? new byte[0] : Bytes.toBytes(schemaName),
                     Bytes.toBytes(tableName));
         Put viewColumnDefinitionPut = new Put(rowKey, HConstants.LATEST_TIMESTAMP);
-        viewColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+        viewColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
             PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, HConstants.LATEST_TIMESTAMP, null);
 
         try (PhoenixConnection conn =
@@ -738,7 +738,7 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
                 byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
                 byte[] qualifier = UPGRADE_MUTEX;
                 Put put = new Put(row);
-                put.add(family, qualifier, UPGRADE_MUTEX_UNLOCKED);
+                put.addColumn(family, qualifier, UPGRADE_MUTEX_UNLOCKED);
                 sysMutexTable.put(put);
                 sysMutexTable.flushCommits();
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
index 5421801..ce01e2b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
@@ -273,7 +273,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
         HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( nonTxTableName));
         List<Put>puts = Lists.newArrayList(new Put(PInteger.INSTANCE.toBytes(1)), new Put(PInteger.INSTANCE.toBytes(2)), new Put(PInteger.INSTANCE.toBytes(3)));
         for (Put put : puts) {
-            put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
+            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
         }
         htable.put(puts);
         
@@ -333,7 +333,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
         // Reset empty column value to an empty value like it is pre-transactions
         HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
         Put put = new Put(PInteger.INSTANCE.toBytes(1));
-        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
+        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
         htable.put(put);
         
         HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 34218d5..4c72c2d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2639,7 +2639,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             int newPosition = ++lastOrdinalPos;
                             byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
                             PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
-                            viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                            viewColumnPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                                     PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
                             mutationsForAddingColumnsToViews.add(viewColumnPut);
                         } else {
@@ -2674,7 +2674,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar);
                         byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
                         PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
-                        viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                        viewColumnPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                                 PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
                         addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view,
                                 deltaNumPkColsSoFar, columnName, viewColumnPut);
@@ -2713,7 +2713,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 }
                 byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
                 PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
-                viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
                 // invalidate the view so that it is removed from the cache
                 invalidateList.add(new ImmutableBytesPtr(viewKey));
@@ -2751,21 +2751,21 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             int oldBaseColumnCount = view.getBaseColumnCount();
             byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
             PInteger.INSTANCE.getCodec().encodeInt(oldBaseColumnCount + baseTableColumnDelta, baseColumnCountPtr, 0);
-            viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr);
         }
         
         if (viewColumnDelta != 0) {
             byte[] columnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
             PInteger.INSTANCE.getCodec().encodeInt(numCols + viewColumnDelta, columnCountPtr, 0);
-            viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, clientTimeStamp, columnCountPtr);
         }
         
         if (changeSequenceNumber) {
             byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
             PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
-            viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
 
             mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
@@ -2781,7 +2781,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 Put positionUpdatePut = new Put(columnKey, clientTimeStamp);
                 byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
                 PInteger.INSTANCE.getCodec().encodeInt(ordinalPosition, ptr, 0);
-                positionUpdatePut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                positionUpdatePut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
                 mutationsForAddingColumnsToViews.add(positionUpdatePut);
                 i++;
@@ -2796,7 +2796,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         if (!basePhysicalTable.isTransactional() && switchAttribute(basePhysicalTable, basePhysicalTable.isTransactional(), tableMetadata, TRANSACTIONAL_BYTES)) {
         	invalidateList.add(new ImmutableBytesPtr(viewKey));
         	Put put = new Put(viewKey);
-            put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
             		TRANSACTIONAL_BYTES, clientTimeStamp, PBoolean.INSTANCE.toBytes(true));
             mutationsForAddingColumnsToViews.add(put);
         }
@@ -3034,14 +3034,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 long newSequenceNumber = index.getSequenceNumber() + 1;
                 byte[] newSequenceNumberPtr = new byte[PLong.INSTANCE.getByteSize()];
                 PLong.INSTANCE.getCodec().encodeLong(newSequenceNumber, newSequenceNumberPtr, 0);
-                indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                indexHeaderRowMutation.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, newSequenceNumberPtr);
                 
                 // increase the column count
                 int newColumnCount = index.getColumns().size() + deltaNumPkColsSoFar;
                 byte[] newColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
                 PInteger.INSTANCE.getCodec().encodeInt(newColumnCount, newColumnCountPtr, 0);
-                indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                indexHeaderRowMutation.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, newColumnCountPtr);
                 
                 // add index row header key to the invalidate list to force clients to fetch the latest meta-data
@@ -3068,7 +3068,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             int indexColumnDataType = IndexUtil.getIndexColumnDataType(true,
                     PDataType.fromTypeId(viewPkColumnDataType)).getSqlType();
             PInteger.INSTANCE.getCodec().encodeInt(indexColumnDataType, indexColumnDataTypeBytes, 0);
-            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.DATA_TYPE_BYTES, indexColumnDataTypeBytes);
             
              
@@ -3078,7 +3078,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES);
             if (decimalDigits != null && decimalDigits.size() > 0) {
                 Cell decimalDigit = decimalDigits.get(0);
-                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES, decimalDigit.getValueArray());
             }
             
@@ -3088,7 +3088,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES);
             if (columnSizes != null && columnSizes.size() > 0) {
                 Cell columnSize = columnSizes.get(0);
-                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES, columnSize.getValueArray());
             }
             
@@ -3097,7 +3097,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     PhoenixDatabaseMetaData.SORT_ORDER_BYTES);
             if (sortOrders != null && sortOrders.size() > 0) {
                 Cell sortOrder = sortOrders.get(0);
-                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.SORT_ORDER_BYTES, sortOrder.getValueArray());
             }
             
@@ -3106,7 +3106,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
             if (dataTableNames != null && dataTableNames.size() > 0) {
                 Cell dataTableName = dataTableNames.get(0);
-                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES, dataTableName.getValueArray());
             }
             
@@ -3114,12 +3114,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             byte[] ordinalPositionBytes = new byte[PInteger.INSTANCE.getByteSize()];
             int ordinalPositionOfNewCol = oldNumberOfColsInIndex + deltaNumPkColsSoFar;
             PInteger.INSTANCE.getCodec().encodeInt(ordinalPositionOfNewCol, ordinalPositionBytes, 0);
-            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, ordinalPositionBytes);
             
             // New PK columns have to be nullable after the first DDL
             byte[] isNullableBytes = PInteger.INSTANCE.toBytes(ResultSetMetaData.columnNullable);
-            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.NULLABLE_BYTES, isNullableBytes);
             
             // Set the key sequence for the pk column to be added
@@ -3127,7 +3127,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar);
             byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
             PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
-            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+            indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
             
             mutationsForAddingColumnsToViews.add(indexColumnDefinitionPut);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 5a26087..abdcf72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -767,7 +767,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                                 if (!timeStamps.contains(kvts)) {
                                     Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
                                         kv.getRowLength());
-                                    put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
+                                    put.addColumn(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
                                         ByteUtil.EMPTY_BYTE_ARRAY);
                                     mutations.add(put);
                                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 2f41dc3..bc2523d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -1154,15 +1154,15 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                 ColumnReference indexColumn = coveredColumnsMap.get(ref);
                 // If table delete was single version, then index delete should be as well
                 if (deleteType == DeleteType.SINGLE_VERSION) {
-                    delete.deleteFamilyVersion(indexColumn.getFamily(), ts);
+                    delete.addFamilyVersion(indexColumn.getFamily(), ts);
                 } else {
-                    delete.deleteFamily(indexColumn.getFamily(), ts);
+                    delete.addFamily(indexColumn.getFamily(), ts);
                 }
             }
             if (deleteType == DeleteType.SINGLE_VERSION) {
-                delete.deleteFamilyVersion(emptyCF, ts);
+                delete.addFamilyVersion(emptyCF, ts);
             } else {
-                delete.deleteFamily(emptyCF, ts);
+                delete.addFamily(emptyCF, ts);
             }
             delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
             return delete;
@@ -1181,9 +1181,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
                     ColumnReference indexColumn = coveredColumnsMap.get(ref);
                     // If point delete for data table, then use point delete for index as well
                     if (kv.getTypeByte() == KeyValue.Type.Delete.getCode()) { 
-                        delete.deleteColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
+                        delete.addColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
                     } else {
-                        delete.deleteColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
+                        delete.addColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
                     }
                 }
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index aafcf12..92eca64 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2651,7 +2651,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
                         PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
                 Put put = new Put(mutexRowKey);
-                put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED);
+                put.addColumn(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED);
                 sysMutexTable.put(put);
             }
         } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 1a11427..082a58b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -1040,10 +1040,10 @@ public class PTableImpl implements PTable {
             newMutations();
             Delete delete = new Delete(key);
             if (families.isEmpty()) {
-                delete.deleteFamily(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), ts);
+                delete.addFamily(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), ts);
             } else {
                 for (PColumnFamily colFamily : families) {
-                    delete.deleteFamily(colFamily.getName().getBytes(), ts);
+                    delete.addFamily(colFamily.getName().getBytes(), ts);
                 }
             }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 8956862..ae077b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -61,7 +61,6 @@ import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PrefixByteDecoder;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.phoenix.util.TimeKeeper;
 
 import com.google.protobuf.ServiceException;
 
@@ -189,13 +188,13 @@ public class StatisticsWriter implements Closeable {
     private void addGuidepost(ImmutableBytesPtr cfKey, List<Mutation> mutations, ImmutableBytesWritable ptr, long byteCount, long rowCount, long timeStamp) {
         byte[] prefix = StatisticsUtil.getRowKey(tableName, cfKey, ptr);
         Put put = new Put(prefix);
-        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES,
+        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES,
                 timeStamp, PLong.INSTANCE.toBytes(byteCount));
-        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
+        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
                 PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES, timeStamp,
                 PLong.INSTANCE.toBytes(rowCount));
         // Add our empty column value so queries behave correctly
-        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp,
+        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp,
                 ByteUtil.EMPTY_BYTE_ARRAY);
         mutations.add(put);
     }
@@ -241,7 +240,7 @@ public class StatisticsWriter implements Closeable {
         long currentTime = EnvironmentEdgeManager.currentTimeMillis();
         byte[] prefix = tableName;
         Put put = new Put(prefix);
-        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES,
+        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES,
                 timeStamp, PDate.INSTANCE.toBytes(new Date(currentTime)));
         return put;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 33b7383..7e280f4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -735,11 +735,11 @@ public class IndexUtil {
             HTableInterface metaTable, PIndexState newState) throws Throwable {
         // Mimic the Put that gets generated by the client on an update of the index state
         Put put = new Put(indexTableKey);
-        put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES,
+        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES,
                 newState.getSerializedBytes());
-        put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES,
+        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES,
                 PLong.INSTANCE.toBytes(minTimeStamp));
-        put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES,
+        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES,
                 PLong.INSTANCE.toBytes(0));
         final List<Mutation> tableMetadata = Collections.<Mutation> singletonList(put);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
index 469dd21..8bb491d 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java
@@ -39,12 +39,10 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.codec.Codec;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.hbase.index.IndexTestingUtils;
 import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
 /**
  * Simple test to read/write simple files via our custom {@link WALCellCodec} to ensure properly
@@ -93,14 +91,14 @@ public class ReadWriteKeyValuesWithCodecTest {
     // Build up a couple of edits
     List<WALEdit> edits = new ArrayList<WALEdit>();
     Put p = new Put(ROW);
-    p.add(FAMILY, null, Bytes.toBytes("v1"));
+    p.addColumn(FAMILY, null, Bytes.toBytes("v1"));
 
     WALEdit withPut = new WALEdit();
     addMutation(withPut, p, FAMILY);
     edits.add(withPut);
 
     Delete d = new Delete(ROW);
-    d.deleteColumn(FAMILY, null);
+    d.addColumn(FAMILY, null);
     WALEdit withDelete = new WALEdit();
     addMutation(withDelete, d, FAMILY);
     edits.add(withDelete);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
index a668c21..83d05f3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java
@@ -101,7 +101,7 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
         // add each of the corresponding families to the put
         int count = 0;
         for (ColumnEntry column : columns) {
-            indexInsert.add(INDEX_ROW_COLUMN_FAMILY,
+            indexInsert.addColumn(INDEX_ROW_COLUMN_FAMILY,
                     ArrayUtils.addAll(Bytes.toBytes(count++), toIndexQualifier(column.ref)), null);
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
index 82f3c3c..c7e1769 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
@@ -73,7 +73,7 @@ public class LocalTableStateTest {
   @Test
   public void testCorrectOrderingWithLazyLoadingColumns() throws Exception {
     Put m = new Put(row);
-    m.add(fam, qual, ts, val);
+    m.addColumn(fam, qual, ts, val);
     // setup mocks
     Configuration conf = new Configuration(false);
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -132,7 +132,7 @@ public class LocalTableStateTest {
             
         };
     Put m = new Put(row);
-    m.add(fam, qual, ts, val);
+    m.addColumn(fam, qual, ts, val);
     // setup mocks
     Configuration conf = new Configuration(false);
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -169,7 +169,7 @@ public class LocalTableStateTest {
             
     };
     Put m = new Put(row);
-    m.add(fam, qual, ts, val);
+    m.addColumn(fam, qual, ts, val);
     // setup mocks
     Configuration conf = new Configuration(false);
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
@@ -201,7 +201,7 @@ public class LocalTableStateTest {
   @SuppressWarnings("unchecked")
   public void testCorrectRollback() throws Exception {
     Put m = new Put(row);
-    m.add(fam, qual, ts, val);
+    m.addColumn(fam, qual, ts, val);
     // setup mocks
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
 
@@ -269,7 +269,7 @@ public class LocalTableStateTest {
     });
     LocalHBaseState state = new LocalTable(env);
     Put pendingUpdate = new Put(row);
-    pendingUpdate.add(fam, qual, ts, val);
+    pendingUpdate.addColumn(fam, qual, ts, val);
     LocalTableState table = new LocalTableState(state, pendingUpdate);
 
     // do the lookup for the given column

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
index 5cc6ada..d63dd6b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java
@@ -200,7 +200,7 @@ public class TestCoveredColumnIndexCodec {
     Delete d = new Delete(PK, 2);
     // need to set the timestamp here, as would actually happen on the server, unlike what happens
     // with puts, where the get the constructor specified timestamp for unspecified methods.
-    d.deleteFamily(FAMILY, 2);
+    d.addFamily(FAMILY, 2);
     // setup the next batch of 'current state', basically just ripping out the current state from
     // the last round
     table = new SimpleTableState(new Result(kvs));
@@ -221,12 +221,12 @@ public class TestCoveredColumnIndexCodec {
 
     // now with the delete of the columns
     d = new Delete(PK, 2);
-    d.deleteColumns(FAMILY, QUAL, 2);
+    d.addColumns(FAMILY, QUAL, 2);
     ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
 
     // this delete needs to match timestamps exactly, by contract, to have any effect
     d = new Delete(PK, 1);
-    d.deleteColumn(FAMILY, QUAL, 1);
+    d.addColumn(FAMILY, QUAL, 1);
     ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
   }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 918c411..a25f7cf 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -51,8 +50,6 @@ import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.phoenix.hbase.index.StubAbortable;
 import org.apache.phoenix.hbase.index.TableName;
 import org.apache.phoenix.hbase.index.exception.IndexWriteException;
-import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
-import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.junit.Rule;
 import org.junit.Test;
@@ -60,9 +57,6 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.collect.LinkedListMultimap;
-import com.google.common.collect.Multimap;
-
 public class TestIndexWriter {
   private static final Log LOG = LogFactory.getLog(TestIndexWriter.class);
   @Rule
@@ -115,7 +109,7 @@ public class TestIndexWriter {
 
     byte[] tableName = this.testName.getTableName();
     Put m = new Put(row);
-    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
+    m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
     Collection<Pair<Mutation, byte[]>> indexUpdates = Arrays.asList(new Pair<Mutation, byte[]>(m,
         tableName));
 
@@ -197,7 +191,7 @@ public class TestIndexWriter {
 
     // update a single table
     Put m = new Put(row);
-    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
+    m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
     final List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
     indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index bfe1d0d..cd29e10 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -101,7 +101,7 @@ public class TestParalleIndexWriter {
     Mockito.when(mockRegion.getTableDesc()).thenReturn(mockTableDesc);
     ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
     Put m = new Put(row);
-    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
+    m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
     Multimap<HTableInterfaceReference, Mutation> indexUpdates =
         ArrayListMultimap.<HTableInterfaceReference, Mutation> create();
     indexUpdates.put(new HTableInterfaceReference(tableName), m);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 6f0881b..32ae108 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -102,7 +102,7 @@ public class TestParalleWriterIndexCommitter {
 
     ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
     Put m = new Put(row);
-    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
+    m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
     Multimap<HTableInterfaceReference, Mutation> indexUpdates =
         ArrayListMultimap.<HTableInterfaceReference, Mutation> create();
     indexUpdates.put(new HTableInterfaceReference(tableName), m);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 017470a..faee74a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -191,7 +191,7 @@ public class TestWALRecoveryCaching {
 
     // load some data into the table
     Put p = new Put(Bytes.toBytes("row"));
-    p.add(family, qual, Bytes.toBytes("value"));
+    p.addColumn(family, qual, Bytes.toBytes("value"));
     HTable primary = new HTable(conf, testTable.getTableName());
     primary.put(p);
     primary.flushCommits();
@@ -234,7 +234,7 @@ public class TestWALRecoveryCaching {
     // make a second put that (1), isn't indexed, so we can be sure of the index state and (2)
     // ensures that our table is back up
     Put p2 = new Put(p.getRow());
-    p2.add(nonIndexedFamily, Bytes.toBytes("Not indexed"), Bytes.toBytes("non-indexed value"));
+    p2.addColumn(nonIndexedFamily, Bytes.toBytes("Not indexed"), Bytes.toBytes("non-indexed value"));
     primary.put(p2);
     primary.flushCommits();
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index 35b607e..d7adacc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -61,8 +61,8 @@ public class TestPerRegionIndexWriteCache {
   Put p = new Put(row);
   Put p2 = new Put(Bytes.toBytes("other row"));
   {
-    p.add(family, qual, val);
-    p2.add(family, qual, val);
+    p.addColumn(family, qual, val);
+    p2.addColumn(family, qual, val);
   }
 
   HRegion r1; // FIXME: Uses private type

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9948036b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 277e257..341fbec 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -786,10 +786,10 @@ public class TestUtil {
             byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
            
             Put put = new Put(markerRowKey);
-            put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
             htable.put(put);
             Delete delete = new Delete(markerRowKey);
-            delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+            delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
             htable.delete(delete);
             htable.close();
             if (table.isTransactional()) {


[3/5] phoenix git commit: PHOENIX-4600 Add retry logic for partial index rebuilder writes

Posted by pb...@apache.org.
PHOENIX-4600 Add retry logic for partial index rebuilder writes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/07436780
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/07436780
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/07436780

Branch: refs/heads/4.x-cdh5.12
Commit: 074367804e5d3409988b237554161b722e5b8b35
Parents: 803abe7
Author: Vincent Poon <vi...@apache.org>
Authored: Thu Apr 19 18:30:14 2018 +0100
Committer: Pedro Boado <pb...@apache.org>
Committed: Thu Apr 19 21:58:11 2018 +0100

----------------------------------------------------------------------
 .../end2end/index/MutableIndexRebuilderIT.java  | 143 +++++++++++++++++++
 .../UngroupedAggregateRegionObserver.java       |  32 +++--
 2 files changed, 160 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/07436780/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
new file mode 100644
index 0000000..8420f16
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
@@ -0,0 +1,143 @@
+package org.apache.phoenix.end2end.index;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.RunUntilFailure;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import com.google.common.collect.Maps;
+
+@RunWith(RunUntilFailure.class)
+public class MutableIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
+    private static final int WAIT_AFTER_DISABLED = 0;
+    private static final long REBUILD_PERIOD = 50000;
+    private static final long REBUILD_INTERVAL = 2000;
+    private static RegionCoprocessorEnvironment indexRebuildTaskRegionEnvironment;
+
+    /**
+     * Tests that the index rebuilder retries for exactly the configured # of retries
+     * @throws Exception
+     */
+    @Test
+    public void testRebuildRetriesSuccessful() throws Throwable {
+        int numberOfRetries = 5;
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(10);
+        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, Boolean.TRUE.toString());
+        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, Long.toString(REBUILD_INTERVAL));
+        serverProps.put(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, "50000000");
+        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, Long.toString(REBUILD_PERIOD)); // batch at 50 seconds
+        serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, Long.toString(WAIT_AFTER_DISABLED));
+        serverProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numberOfRetries + "");
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
+        indexRebuildTaskRegionEnvironment =
+                (RegionCoprocessorEnvironment) getUtility()
+                .getRSForFirstRegionInTable(
+                    PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+                .getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+                .get(0).getCoprocessorHost()
+                .findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+        MetaDataRegionObserver.initRebuildIndexConnectionProps(
+            indexRebuildTaskRegionEnvironment.getConfiguration());
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            String schemaName = generateUniqueName();
+            String tableName = generateUniqueName();
+            String indexName = generateUniqueName();
+            final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+            final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+            conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k VARCHAR PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR) DISABLE_INDEX_ON_WRITE_FAILURE = TRUE");
+            conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1, v2)");
+            HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+            IndexUtil.updateIndexState(fullIndexName, EnvironmentEdgeManager.currentTimeMillis(), metaTable, PIndexState.DISABLE);
+            conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
+            conn.commit();
+            // Simulate write failure when rebuilder runs
+            TestUtil.addCoprocessor(conn, fullIndexName, WriteFailingRegionObserver.class);
+            waitForIndexState(conn, fullTableName, fullIndexName, PIndexState.INACTIVE);
+            // rebuild writes should retry for exactly the configured number of times
+            ExecutorService executor = Executors.newSingleThreadExecutor();
+            try {
+                Future<Boolean> future = executor.submit(new Callable<Boolean>() {
+                    @Override
+                    public Boolean call() throws Exception {
+                        runIndexRebuilder(fullTableName);
+                        return true;
+                    }});
+                assertTrue(future.get(120, TimeUnit.SECONDS));
+                assertEquals(numberOfRetries, WriteFailingRegionObserver.attempts.get());
+            } finally {
+                executor.shutdownNow();
+            }
+        }
+    }
+
+    public static void waitForIndexState(Connection conn, String fullTableName, String fullIndexName, PIndexState expectedIndexState) throws InterruptedException, SQLException {
+        int nRetries = 2;
+        PIndexState actualIndexState = null;
+        do {
+            runIndexRebuilder(fullTableName);
+            if ((actualIndexState = TestUtil.getIndexState(conn, fullIndexName)) == expectedIndexState) {
+                return;
+            }
+            Thread.sleep(1000);
+        } while (--nRetries > 0);
+        fail("Expected index state of " + expectedIndexState + ", but was " + actualIndexState);
+    }
+
+    private static void runIndexRebuilder(String table) throws InterruptedException, SQLException {
+        runIndexRebuilder(Collections.<String>singletonList(table));
+    }
+
+    private static void runIndexRebuilder(List<String> tables) throws InterruptedException, SQLException {
+        BuildIndexScheduleTask task =
+                new MetaDataRegionObserver.BuildIndexScheduleTask(
+                        indexRebuildTaskRegionEnvironment, tables);
+        task.run();
+    }
+
+    public static class WriteFailingRegionObserver extends SimpleRegionObserver {
+        public static volatile AtomicInteger attempts = new AtomicInteger(0);
+        @Override
+        public void postBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+            attempts.incrementAndGet();
+            throw new DoNotRetryIOException("Simulating write failure on " + c.getEnvironment().getRegionInfo().getTable().getNameAsString());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/07436780/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 31b512a..5a26087 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -243,6 +243,19 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         indexWriteProps = new ReadOnlyProps(indexWriteConfig.iterator());
     }
 
+    private void commitBatchWithRetries(final Region region, final List<Mutation> localRegionMutations, final long blockingMemstoreSize) throws IOException {
+        try {
+            commitBatch(region, localRegionMutations, blockingMemstoreSize);
+        } catch (IOException e) {
+            handleIndexWriteException(localRegionMutations, e, new MutateCommand() {
+                @Override
+                public void doMutation() throws IOException {
+                    commitBatch(region, localRegionMutations, blockingMemstoreSize);
+                }
+            });
+        }
+    }
+
     private void commitBatch(Region region, List<Mutation> mutations, long blockingMemstoreSize) throws IOException {
       if (mutations.isEmpty()) {
           return;
@@ -251,7 +264,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         Mutation[] mutationArray = new Mutation[mutations.size()];
       // When memstore size reaches blockingMemstoreSize we are waiting 3 seconds for the
       // flush happen which decrease the memstore size and then writes allowed on the region.
-      for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 30; i++) {
+      for (int i = 0; blockingMemstoreSize > 0 && region.getMemstoreSize() > blockingMemstoreSize && i < 30; i++) {
           try {
               checkForRegionClosing();
               Thread.sleep(100);
@@ -892,16 +905,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
         setIndexAndTransactionProperties(mutations, indexUUID, indexMaintainersPtr, txState, clientVersionBytes, useIndexProto);
         separateLocalAndRemoteMutations(targetHTable, region, mutations, localRegionMutations, remoteRegionMutations,
             isPKChanging);
-        try {
-            commitBatch(region, localRegionMutations, blockingMemStoreSize);
-        } catch (IOException e) {
-            handleIndexWriteException(localRegionMutations, e, new MutateCommand() {
-                @Override
-                public void doMutation() throws IOException {
-                    commitBatch(region, localRegionMutations, blockingMemStoreSize);
-                }
-            });
-        }
+        commitBatchWithRetries(region, localRegionMutations, blockingMemStoreSize);
         try {
             commitBatchWithHTable(targetHTable, remoteRegionMutations);
         } catch (IOException e) {
@@ -1069,8 +1073,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                             }
                         }
                         if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
-                            region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE,
-                                    HConstants.NO_NONCE);
+                            commitBatchWithRetries(region, mutations, -1);
                             uuidValue = ServerCacheClient.generateId();
                             mutations.clear();
                         }
@@ -1079,8 +1082,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                     
                 } while (hasMore);
                 if (!mutations.isEmpty()) {
-                    region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE,
-                            HConstants.NO_NONCE);
+                    commitBatchWithRetries(region, mutations, -1);
                 }
             }
         } catch (IOException e) {


[2/5] phoenix git commit: Keep Ring buffer size small by default and avoid starting disruptor for server connection

Posted by pb...@apache.org.
Keep Ring buffer size small by default and avoid starting disruptor for server connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/803abe74
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/803abe74
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/803abe74

Branch: refs/heads/4.x-cdh5.12
Commit: 803abe749601cbfb222cfc45c04c2e214d70de3c
Parents: 3939c8d
Author: Ankit Singhal <an...@gmail.com>
Authored: Tue Apr 17 10:39:29 2018 +0100
Committer: Pedro Boado <pb...@apache.org>
Committed: Thu Apr 19 21:57:59 2018 +0100

----------------------------------------------------------------------
 .../org/apache/phoenix/log/QueryLoggerDisruptor.java |  2 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 15 +++++++++------
 .../main/java/org/apache/phoenix/util/QueryUtil.java | 14 ++++++++++++--
 3 files changed, 22 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/803abe74/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index b548d6c..1f2240e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -43,7 +43,7 @@ public class QueryLoggerDisruptor implements Closeable{
     private volatile Disruptor<RingBufferEvent> disruptor;
     private boolean isClosed = false;
     //number of elements to create within the ring buffer.
-    private static final int RING_BUFFER_SIZE = 256 * 1024;
+    private static final int RING_BUFFER_SIZE = 8 * 1024;
     private static final Log LOG = LogFactory.getLog(QueryLoggerDisruptor.class);
     private static final String DEFAULT_WAIT_STRATEGY = BlockingWaitStrategy.class.getName();
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/803abe74/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 02fcf24..aafcf12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -195,7 +195,6 @@ import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.EmptySequenceCacheException;
 import org.apache.phoenix.schema.FunctionNotFoundException;
 import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.schema.MetaDataSplitPolicy;
 import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
 import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
 import org.apache.phoenix.schema.PColumn;
@@ -243,6 +242,7 @@ import org.apache.phoenix.util.PhoenixContextExecutor;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PhoenixStopWatch;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -406,11 +406,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         this.maxConnectionsAllowed = config.getInt(QueryServices.CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS,
             QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS);
         this.shouldThrottleNumConnections = (maxConnectionsAllowed > 0);
-        try {
-            this.queryDisruptor = new QueryLoggerDisruptor(this.config);
-        } catch (SQLException e) {
-            logger.warn("Unable to initiate qeuery logging service !!");
-            e.printStackTrace();
+        if (!QueryUtil.isServerConnection(props)) {
+            //Start queryDistruptor everytime as log level can be change at connection level as well, but we can avoid starting for server connections.
+            try {
+                this.queryDisruptor = new QueryLoggerDisruptor(this.config);
+            } catch (SQLException e) {
+                logger.warn("Unable to initiate qeuery logging service !!");
+                e.printStackTrace();
+            }
         }
 
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/803abe74/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index d7154a1..9d2e53c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -82,6 +82,7 @@ public final class QueryUtil {
      */
     public static final int DATA_TYPE_NAME_POSITION = 6;
 
+    public static final String IS_SERVER_CONNECTION = "IS_SERVER_CONNECTION";
     private static final String SELECT = "SELECT";
     private static final String FROM = "FROM";
     private static final String WHERE = "WHERE";
@@ -358,6 +359,15 @@ public final class QueryUtil {
             SQLException {
         return getConnectionOnServer(new Properties(), conf);
     }
+    
+    public static void setServerConnection(Properties props){
+        UpgradeUtil.doNotUpgradeOnFirstConnection(props);
+        props.setProperty(IS_SERVER_CONNECTION, Boolean.TRUE.toString());
+    }
+    
+    public static boolean isServerConnection(ReadOnlyProps props) {
+        return props.getBoolean(IS_SERVER_CONNECTION, false);
+    }
 
     /**
      * @return {@link PhoenixConnection} with {@value UpgradeUtil#DO_NOT_UPGRADE} set so that we don't initiate metadata upgrade.
@@ -365,13 +375,13 @@ public final class QueryUtil {
     public static Connection getConnectionOnServer(Properties props, Configuration conf)
             throws ClassNotFoundException,
             SQLException {
-        UpgradeUtil.doNotUpgradeOnFirstConnection(props);
+        setServerConnection(props);
         return getConnection(props, conf);
     }
 
     public static Connection getConnectionOnServerWithCustomUrl(Properties props, String principal)
             throws SQLException, ClassNotFoundException {
-        UpgradeUtil.doNotUpgradeOnFirstConnection(props);
+        setServerConnection(props);
         String url = getConnectionUrl(props, null, principal);
         LOG.info("Creating connection with the jdbc url: " + url);
         return DriverManager.getConnection(url, props);


[5/5] phoenix git commit: Changes for CDH 5.12.x

Posted by pb...@apache.org.
Changes for CDH 5.12.x


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3fc3c5f3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3fc3c5f3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3fc3c5f3

Branch: refs/heads/4.x-cdh5.12
Commit: 3fc3c5f3f955ea52df34de8de211eaa3ce7d9b15
Parents: 9948036
Author: Pedro Boado <pb...@apache.org>
Authored: Sat Mar 10 17:54:04 2018 +0000
Committer: Pedro Boado <pb...@apache.org>
Committed: Thu Apr 19 22:00:40 2018 +0100

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                        |  2 +-
 phoenix-client/pom.xml                          |  2 +-
 phoenix-core/pom.xml                            |  2 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 34 ++++++++++++++++++--
 phoenix-flume/pom.xml                           |  2 +-
 phoenix-hive/pom.xml                            |  2 +-
 phoenix-kafka/pom.xml                           |  2 +-
 phoenix-load-balancer/pom.xml                   |  2 +-
 phoenix-parcel/pom.xml                          |  2 +-
 phoenix-pherf/pom.xml                           |  2 +-
 phoenix-pig/pom.xml                             |  2 +-
 phoenix-queryserver-client/pom.xml              |  2 +-
 phoenix-queryserver/pom.xml                     |  2 +-
 phoenix-server/pom.xml                          |  2 +-
 phoenix-spark/pom.xml                           |  2 +-
 phoenix-tracing-webapp/pom.xml                  |  2 +-
 pom.xml                                         |  4 +--
 17 files changed, 49 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 55a9a6e..14225ee 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-assembly</artifactId>
   <name>Phoenix Assembly</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 2454de6..e211008 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-client</artifactId>
   <name>Phoenix Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index e1f8e2a..2d837a2 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-core</artifactId>
   <name>Phoenix Core</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index 4fdddf5..d1f05f8 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -124,6 +124,36 @@ public class PhoenixRpcScheduler extends RpcScheduler {
     public void setMetadataExecutorForTesting(RpcExecutor executor) {
         this.metadataCallExecutor = executor;
     }
-    
-    
+
+    @Override
+    public int getReadQueueLength() {
+        return delegate.getReadQueueLength();
+    }
+
+    @Override
+    public int getWriteQueueLength() {
+        return delegate.getWriteQueueLength();
+    }
+
+    @Override
+    public int getScanQueueLength() {
+        return delegate.getScanQueueLength();
+    }
+
+    @Override
+    public int getActiveReadRpcHandlerCount() {
+        return delegate.getActiveReadRpcHandlerCount();
+    }
+
+    @Override
+    public int getActiveWriteRpcHandlerCount() {
+        return delegate.getActiveWriteRpcHandlerCount();
+    }
+
+    @Override
+    public int getActiveScanRpcHandlerCount() {
+        return delegate.getActiveScanRpcHandlerCount();
+    }
+
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index d61a9aa..8a78010 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-flume</artifactId>
   <name>Phoenix - Flume</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-hive/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 809fbea..804ba5f 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-hive</artifactId>
   <name>Phoenix - Hive</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index af6b4fe..08a8bfd 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+		<version>4.14.0-cdh5.12.2-SNAPSHOT</version>
 	</parent>
 	<artifactId>phoenix-kafka</artifactId>
 	<name>Phoenix - Kafka</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-load-balancer/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 81e124a..cdb4c1b 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-load-balancer</artifactId>
   <name>Phoenix Load Balancer</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-parcel/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-parcel/pom.xml b/phoenix-parcel/pom.xml
index 5e6fccc..8e7b096 100644
--- a/phoenix-parcel/pom.xml
+++ b/phoenix-parcel/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-parcel</artifactId>
   <name>Phoenix Parcels for CDH</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 7831f35..0634a01 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@
 	<parent>
 		<groupId>org.apache.phoenix</groupId>
 		<artifactId>phoenix</artifactId>
-		<version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+		<version>4.14.0-cdh5.12.2-SNAPSHOT</version>
 	</parent>
 
 	<artifactId>phoenix-pherf</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index e5d0d52..cd4f6bc 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-pig</artifactId>
   <name>Phoenix - Pig</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-queryserver-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver-client/pom.xml b/phoenix-queryserver-client/pom.xml
index 83cfde6..86c56b9 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver-client</artifactId>
   <name>Phoenix Query Server Client</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-queryserver/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 7180b18..e6c32cb 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-queryserver</artifactId>
   <name>Phoenix Query Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 01a7bc3..d33bdaa 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-server</artifactId>
   <name>Phoenix Server</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index a45d4b5..b3b6e1c 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -28,7 +28,7 @@
   <parent>
     <groupId>org.apache.phoenix</groupId>
     <artifactId>phoenix</artifactId>
-    <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+    <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   </parent>
   <artifactId>phoenix-spark</artifactId>
   <name>Phoenix - Spark</name>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/phoenix-tracing-webapp/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-tracing-webapp/pom.xml b/phoenix-tracing-webapp/pom.xml
index 93edc43..5226f84 100755
--- a/phoenix-tracing-webapp/pom.xml
+++ b/phoenix-tracing-webapp/pom.xml
@@ -27,7 +27,7 @@
     <parent>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix</artifactId>
-      <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+      <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
     </parent>
 
     <artifactId>phoenix-tracing-webapp</artifactId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fc3c5f3/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 970428a..947ed59 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.phoenix</groupId>
   <artifactId>phoenix</artifactId>
-  <version>4.14.0-cdh5.11.2-SNAPSHOT</version>
+  <version>4.14.0-cdh5.12.2-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Phoenix</name>
   <description>A SQL layer over HBase</description>
@@ -86,7 +86,7 @@
   <parent>
     <groupId>com.cloudera.cdh</groupId>
     <artifactId>cdh-root</artifactId>
-    <version>5.11.2</version>
+    <version>5.12.2</version>
   </parent>
 
   <scm>