You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ma...@apache.org on 2015/04/16 16:31:42 UTC

[01/50] [abbrv] phoenix git commit: PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0 - ADDENDUM for HBASE-13109

Repository: phoenix
Updated Branches:
  refs/heads/calcite 2368ea6d3 -> 9309fff7e


PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0 - ADDENDUM for HBASE-13109


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ad2ad0ce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ad2ad0ce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ad2ad0ce

Branch: refs/heads/calcite
Commit: ad2ad0cefd5d19a9bc84345444455a9ecbb55c78
Parents: d70f389
Author: Enis Soztutar <en...@apache.org>
Authored: Wed Mar 25 18:08:35 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Wed Mar 25 18:08:35 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/IndexHalfStoreFileReader.java      | 2 +-
 .../phoenix/hbase/index/scanner/FilteredKeyValueScanner.java     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ad2ad0ce/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 654daf0..49e2022 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -387,7 +387,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
             // Added for compatibility with HBASE-13109
             // Once we drop support for older versions, add an @override annotation here
             // and figure out how to get the next indexed key
-            public byte[] getNextIndexedKey() {
+            public Cell getNextIndexedKey() {
                 return null; // indicate that we cannot use the optimization
             }
         };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ad2ad0ce/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index aae13fb..e225696 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -162,7 +162,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     // Added for compatibility with HBASE-13109
     // Once we drop support for older versions, add an @override annotation here
     // and figure out how to get the next indexed key
-    public byte[] getNextIndexedKey() {
+    public Cell getNextIndexedKey() {
         return null; // indicate that we cannot use the optimization
     }
-}
\ No newline at end of file
+}


[41/50] [abbrv] phoenix git commit: PHOENIX-1705 implement ARRAY_APPEND built in function (Dumindu Buddhika)

Posted by ma...@apache.org.
PHOENIX-1705 implement ARRAY_APPEND built in function (Dumindu Buddhika)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7ef17181
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7ef17181
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7ef17181

Branch: refs/heads/calcite
Commit: 7ef1718127ffaa99adbc4c25ec3715d9472464f9
Parents: 986080f
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Apr 14 12:07:29 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Apr 14 12:07:29 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ArrayAppendFunctionIT.java  | 667 +++++++++++++++++++
 .../function/ArrayAppendFunction.java           | 127 ++++
 .../expression/ArrayAppendFunctionTest.java     | 345 ++++++++++
 3 files changed, 1139 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ef17181/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
new file mode 100644
index 0000000..1957b3a
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
@@ -0,0 +1,667 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.*;
+
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.junit.Test;
+
+public class ArrayAppendFunctionIT extends BaseHBaseManagedTimeIT {
+    private void initTables(Connection conn) throws Exception {
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[],integers INTEGER[],doubles DOUBLE[],bigints BIGINT[],chars CHAR(15)[],double1 DOUBLE,char1 CHAR(17),nullcheck INTEGER,chars2 CHAR(15)[])";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO regions(region_name,varchars,integers,doubles,bigints,chars,double1,char1,nullcheck,chars2) VALUES('SF Bay Area'," +
+                "ARRAY['2345','46345','23234']," +
+                "ARRAY[2345,46345,23234,456]," +
+                "ARRAY[23.45,46.345,23.234,45.6,5.78]," +
+                "ARRAY[12,34,56,78,910]," +
+                "ARRAY['a','bbbb','c','ddd','e']," +
+                "23.45," +
+                "'wert'," +
+                "NULL," +
+                "ARRAY['a','bbbb','c','ddd','e','foo']" +
+                ")";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.execute();
+        conn.commit();
+    }
+
+    private void initTablesDesc(Connection conn, String type, String val) throws Exception {
+        String ddl = "CREATE TABLE regions (pk " + type + " PRIMARY KEY DESC,varchars VARCHAR[],integers INTEGER[],doubles DOUBLE[],bigints BIGINT[],chars CHAR(15)[],chars2 CHAR(15)[], bools BOOLEAN[])";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO regions(pk,varchars,integers,doubles,bigints,chars,chars2,bools) VALUES(" + val + "," +
+                "ARRAY['2345','46345','23234']," +
+                "ARRAY[2345,46345,23234,456]," +
+                "ARRAY[23.45,46.345,23.234,45.6,5.78]," +
+                "ARRAY[12,34,56,78,910]," +
+                "ARRAY['a','bbbb','c','ddd','e']," +
+                "ARRAY['a','bbbb','c','ddd','e','foo']," +
+                "ARRAY[true,false]" +
+                ")";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.execute();
+        conn.commit();
+    }
+
+    @Test
+    public void testArrayAppendFunctionVarchar() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(varchars,'34567') FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"2345", "46345", "23234", "34567"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInteger() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(integers,1234) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2345, 46345, 23234, 456, 1234};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionDouble() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(doubles,double1) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, 23.45};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionDouble2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(doubles,23) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, new Double(23)};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionBigint() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(bigints,1112) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Long[] longs = new Long[]{12l, 34l, 56l, 78l, 910l, 1112l};
+
+        Array array = conn.createArrayOf("BIGINT", longs);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionChar() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(chars,'fac') FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"a", "bbbb", "c", "ddd", "e", "fac"};
+
+        Array array = conn.createArrayOf("CHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test(expected = TypeMismatchException.class)
+    public void testArrayAppendFunctionIntToCharArray() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(varchars,234) FROM regions WHERE region_name = 'SF Bay Area'");
+    }
+
+    @Test(expected = TypeMismatchException.class)
+    public void testArrayAppendFunctionVarcharToIntegerArray() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(integers,'234') FROM regions WHERE region_name = 'SF Bay Area'");
+
+    }
+
+    @Test(expected = SQLException.class)
+    public void testArrayAppendFunctionChar2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(chars,'facfacfacfacfacfacfac') FROM regions WHERE region_name = 'SF Bay Area'");
+        rs.next();
+        rs.getArray(1);
+    }
+
+    @Test
+    public void testArrayAppendFunctionIntegerToDoubleArray() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(doubles,45) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, 45.0};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithNestedFunctions1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(ARRAY[23,45],integers[1]) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{23, 45, 2345};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithNestedFunctions2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(integers,ARRAY_ELEM(ARRAY[2,4],1)) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2345, 46345, 23234, 456, 2};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithNestedFunctions3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(doubles,ARRAY_ELEM(doubles,2)) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, 46.345};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithUpsert1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO regions(region_name,varchars) VALUES('SF Bay Area',ARRAY_APPEND(ARRAY['hello','world'],':-)'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT varchars FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"hello", "world", ":-)"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithUpsert2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,integers INTEGER[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO regions(region_name,integers) VALUES('SF Bay Area',ARRAY_APPEND(ARRAY[4,5],6))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT integers FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{4, 5, 6};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithUpsert3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO regions(region_name,doubles) VALUES('SF Bay Area',ARRAY_APPEND(ARRAY[5.67,7.87],9.0))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT doubles FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{5.67, 7.87, new Double(9)};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithUpsertSelect1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+        conn.createStatement().execute(ddl);
+
+        ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area',ARRAY_APPEND(ARRAY[5.67,7.87],9.0))";
+        conn.createStatement().execute(dml);
+
+        dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area2',ARRAY_APPEND(ARRAY[56.7,7.87],9.2))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        dml = "UPSERT INTO target(region_name, doubles) SELECT region_name, ARRAY_APPEND(doubles,5) FROM source";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT doubles FROM target");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{5.67, 7.87, new Double(9), new Double(5)};
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertTrue(rs.next());
+
+        doubles = new Double[]{56.7, 7.87, new Double(9.2), new Double(5)};
+        array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithUpsertSelect2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+        conn.createStatement().execute(ddl);
+
+        ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area',ARRAY_APPEND(ARRAY['abcd','b'],'c'))";
+        conn.createStatement().execute(dml);
+
+        dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area2',ARRAY_APPEND(ARRAY['d','fgh'],'something'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        dml = "UPSERT INTO target(region_name, varchars) SELECT region_name, ARRAY_APPEND(varchars,'stu') FROM source";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT varchars FROM target");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"abcd", "b", "c", "stu"};
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertTrue(rs.next());
+
+        strings = new String[]{"d", "fgh", "something", "stu"};
+        array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[2345,46345,23234,456,123]=ARRAY_APPEND(integers,123)");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE varchars[1]=ANY(ARRAY_APPEND(ARRAY['2345','46345','23234'],'1234'))");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['2345','46345','23234','1234']=ARRAY_APPEND(ARRAY['2345','46345','23234'],'1234')");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere4() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[23.45,4634.5,2.3234,123.4]=ARRAY_APPEND(ARRAY[23.45,4634.5,2.3234],123.4)");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere5() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['2345','46345','23234','foo']=ARRAY_APPEND(varchars,'foo')");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere6() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE chars2=ARRAY_APPEND(chars,'foo')");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionInWhere7() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[2,3,4]=ARRAY_APPEND(ARRAY[2,3],4)");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionIntegerWithNull() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(NULL,NULL) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2345, 46345, 23234, 456};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(null, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionVarcharWithNull() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(varchars,NULL) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"2345", "46345", "23234"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionDoublesWithNull() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(doubles,NULL) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionCharsWithNull() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(chars,NULL) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"a", "bbbb", "c", "ddd", "e"};
+
+        Array array = conn.createArrayOf("CHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionWithNull() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(integers,nullcheck) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2345, 46345, 23234, 456};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test(expected = SQLException.class)
+    public void testArrayAppendFunctionCharLimitCheck() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(chars,char1) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"a", "bbbb", "c", "ddd", "e", "wert"};
+
+        Array array = conn.createArrayOf("CHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionIntegerDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "INTEGER", "23");
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(integers,pk) FROM regions");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2345, 46345, 23234, 456, 23};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+
+    }
+
+    @Test
+    public void testArrayAppendFunctionVarcharDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "VARCHAR", "'e'");
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(varchars,pk) FROM regions");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"2345", "46345", "23234", "e"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionBigIntDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "BIGINT", "1112");
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(bigints,pk) FROM regions");
+        assertTrue(rs.next());
+
+        Long[] longs = new Long[]{12l, 34l, 56l, 78l, 910l, 1112l};
+
+        Array array = conn.createArrayOf("BIGINT", longs);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayAppendFunctionBooleanDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "BOOLEAN", "false");
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(bools,pk) FROM regions");
+        assertTrue(rs.next());
+
+        Boolean[] booleans = new Boolean[]{true, false, false};
+
+        Array array = conn.createArrayOf("BOOLEAN", booleans);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ef17181/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
new file mode 100644
index 0000000..db92d61
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.exception.DataExceedsCapacityException;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.types.*;
+import org.apache.phoenix.schema.tuple.Tuple;
+
+@FunctionParseNode.BuiltInFunction(name = ArrayAppendFunction.NAME, args = {
+        @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class,
+                PVarbinaryArray.class}),
+        @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}, defaultValue = "null")})
+public class ArrayAppendFunction extends ScalarFunction {
+
+    public static final String NAME = "ARRAY_APPEND";
+
+    public ArrayAppendFunction() {
+    }
+
+    public ArrayAppendFunction(List<Expression> children) throws TypeMismatchException {
+        super(children);
+
+        if (getDataType() != null && !(getElementExpr() instanceof LiteralExpression && getElementExpr().isNullable()) && !getElementDataType().isCoercibleTo(getBaseType())) {
+            throw TypeMismatchException.newException(getBaseType(), getElementDataType());
+        }
+
+        // If the base type of an element is fixed width, make sure the element being appended will fit
+        if (getDataType() != null && getElementExpr().getDataType().getByteSize() == null && getElementDataType() != null && getBaseType().isFixedWidth() && getElementDataType().isFixedWidth() && getArrayExpr().getMaxLength() != null &&
+                getElementExpr().getMaxLength() != null && getElementExpr().getMaxLength() > getArrayExpr().getMaxLength()) {
+            throw new DataExceedsCapacityException("");
+        }
+        // If the base type has a scale, make sure the element being appended has a scale less than or equal to it
+        if (getDataType() != null && getArrayExpr().getScale() != null && getElementExpr().getScale() != null &&
+                getElementExpr().getScale() > getArrayExpr().getScale()) {
+            throw new DataExceedsCapacityException(getBaseType(), getArrayExpr().getMaxLength(), getArrayExpr().getScale());
+        }
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+
+        if (!getArrayExpr().evaluate(tuple, ptr)) {
+            return false;
+        } else if (ptr.getLength() == 0) {
+            return true;
+        }
+        int arrayLength = PArrayDataType.getArrayLength(ptr, getBaseType(), getArrayExpr().getMaxLength());
+
+        int length = ptr.getLength();
+        int offset = ptr.getOffset();
+        byte[] arrayBytes = ptr.get();
+
+        if (!getElementExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) {
+            ptr.set(arrayBytes, offset, length);
+            return true;
+        }
+
+        if (!getBaseType().isSizeCompatible(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getArrayExpr().getMaxLength(), getArrayExpr().getScale())) {
+            throw new DataExceedsCapacityException("");
+        }
+
+        getBaseType().coerceBytes(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getElementExpr().getSortOrder(), getArrayExpr().getMaxLength(), getArrayExpr().getScale(), getArrayExpr().getSortOrder());
+
+        return PArrayDataType.appendItemToArray(ptr, length, offset, arrayBytes, getBaseType(), arrayLength, getMaxLength(), getArrayExpr().getSortOrder());
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return children.get(0).getDataType();
+    }
+
+    @Override
+    public Integer getMaxLength() {
+        return this.children.get(0).getMaxLength();
+    }
+
+    @Override
+    public SortOrder getSortOrder() {
+        return getChildren().get(0).getSortOrder();
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    public Expression getArrayExpr() {
+        return getChildren().get(0);
+    }
+
+    public Expression getElementExpr() {
+        return getChildren().get(1);
+    }
+
+    public PDataType getBaseType() {
+        return PDataType.arrayBaseType(getArrayExpr().getDataType());
+    }
+
+    public PDataType getElementDataType() {
+        return getElementExpr().getDataType();
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ef17181/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java
new file mode 100644
index 0000000..2b4cb84
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java
@@ -0,0 +1,345 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.Calendar;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ArrayAppendFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.*;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class ArrayAppendFunctionTest {
+
+    private static void testExpression(LiteralExpression array, LiteralExpression element, PhoenixArray expected)
+            throws SQLException {
+        List<Expression> expressions = Lists.newArrayList((Expression) array);
+        expressions.add(element);
+
+        Expression arrayAppendFunction = new ArrayAppendFunction(expressions);
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        arrayAppendFunction.evaluate(null, ptr);
+        PhoenixArray result = (PhoenixArray) arrayAppendFunction.getDataType().toObject(ptr, expressions.get(0).getSortOrder(), array.getMaxLength(), array.getScale());
+        assertTrue(result.equals(expected));
+    }
+
+    private static void test(PhoenixArray array, Object element, PDataType arrayDataType, Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) throws SQLException {
+        LiteralExpression arrayLiteral, elementLiteral;
+        arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, arraySortOrder, Determinism.ALWAYS);
+        elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, elementSortOrder, Determinism.ALWAYS);
+        testExpression(arrayLiteral, elementLiteral, expected);
+    }
+
+    @Test
+    public void testArrayAppendFunction1() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{1, 2, -3, 4, 5};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction2() throws Exception {
+        Object[] o = new Object[]{"1", "2", "3", "4"};
+        Object[] o2 = new Object[]{"1", "2", "3", "4", "56"};
+        Object element = "56";
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction3() throws Exception {
+        //offset array short to int transition
+        Object[] o = new Object[Short.MAX_VALUE + 1];
+        for (int i = 0; i < o.length; i++) {
+            o[i] = "a";
+        }
+        Object[] o2 = new Object[Short.MAX_VALUE + 2];
+        for (int i = 0; i < o2.length - 1; i++) {
+            o2[i] = "a";
+        }
+        Object element = "b";
+        o2[o2.length - 1] = element;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction4() throws Exception {
+        //offset array int
+        Object[] o = new Object[Short.MAX_VALUE + 7];
+        for (int i = 0; i < o.length; i++) {
+            o[i] = "a";
+        }
+        Object[] o2 = new Object[Short.MAX_VALUE + 8];
+        for (int i = 0; i < o2.length - 1; i++) {
+            o2[i] = "a";
+        }
+        Object element = "b";
+        o2[o2.length - 1] = element;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction5() throws Exception {
+        Boolean[] o = new Boolean[]{true, false, false, true};
+        Boolean[] o2 = new Boolean[]{true, false, false, true, false};
+        Boolean element = false;
+        PDataType baseType = PBoolean.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction6() throws Exception {
+        Object[] o = new Object[]{new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3)};
+        Object[] o2 = new Object[]{new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3), new Float(8.9)};
+        Object element = 8.9;
+        PDataType baseType = PFloat.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction7() throws Exception {
+        Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE};
+        Object[] o2 = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE, 12.67};
+        Object element = 12.67;
+        PDataType baseType = PDouble.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction8() throws Exception {
+        Object[] o = new Object[]{123l, 677l, 98789l, -78989l, 66787l};
+        Object[] o2 = new Object[]{123l, 677l, 98789l, -78989l, 66787l, 543l};
+        Object element = 543l;
+        PDataType baseType = PLong.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction9() throws Exception {
+        Object[] o = new Object[]{(short) 34, (short) -23, (short) -89, (short) 999, (short) 34};
+        Object[] o2 = new Object[]{(short) 34, (short) -23, (short) -89, (short) 999, (short) 34, (short) 7};
+        Object element = (short) 7;
+        PDataType baseType = PSmallint.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction10() throws Exception {
+        Object[] o = new Object[]{(byte) 4, (byte) 8, (byte) 9};
+        Object[] o2 = new Object[]{(byte) 4, (byte) 8, (byte) 9, (byte) 6};
+        Object element = (byte) 6;
+        PDataType baseType = PTinyint.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction11() throws Exception {
+        Object[] o = new Object[]{BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785)};
+        Object[] o2 = new Object[]{BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785), BigDecimal.valueOf(-19)};
+        Object element = BigDecimal.valueOf(-19);
+        PDataType baseType = PDecimal.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction12() throws Exception {
+        Calendar calendar = Calendar.getInstance();
+        java.util.Date currentDate = calendar.getTime();
+        java.sql.Date date = new java.sql.Date(currentDate.getTime());
+
+        Object[] o = new Object[]{date, date, date};
+        Object[] o2 = new Object[]{date, date, date, date};
+        PDataType baseType = PDate.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction13() throws Exception {
+        Calendar calendar = Calendar.getInstance();
+        java.util.Date currentDate = calendar.getTime();
+        java.sql.Time time = new java.sql.Time(currentDate.getTime());
+
+        Object[] o = new Object[]{time, time, time};
+        Object[] o2 = new Object[]{time, time, time, time};
+        PDataType baseType = PTime.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction14() throws Exception {
+        Calendar calendar = Calendar.getInstance();
+        java.util.Date currentDate = calendar.getTime();
+        java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime());
+
+        Object[] o = new Object[]{timestamp, timestamp, timestamp};
+        Object[] o2 = new Object[]{timestamp, timestamp, timestamp, timestamp};
+        PDataType baseType = PTimestamp.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction15() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{1, 2, -3, 4, 5};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction16() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{1, 2, -3, 4, 5};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayAppendFunction17() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{1, 2, -3, 4, 5};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayAppendFunction18() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"1", "2", "3", "4", "5"};
+        Object element = "5";
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayAppendFunction19() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"1", "2", "3", "4", "5"};
+        Object element = "5";
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayAppendFunction20() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"1", "2", "3", "4", "5"};
+        Object element = "5";
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayAppendFunction21() throws Exception {
+        Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE};
+        Object[] o2 = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE, 12.67};
+        Object element = 12.67;
+        PDataType baseType = PDouble.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayAppendFunction22() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"1", "2", "3", "4"};
+        Object element = null;
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+}


[49/50] [abbrv] phoenix git commit: PHOENIX-1815 - Spark Datasource api

Posted by ma...@apache.org.
PHOENIX-1815 - Spark Datasource api


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3fb3bb4d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3fb3bb4d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3fb3bb4d

Branch: refs/heads/calcite
Commit: 3fb3bb4d2231972dc06326251b76cc1431da7386
Parents: e1bbb94
Author: ravimagham <ra...@apache.org>
Authored: Wed Apr 15 19:03:33 2015 -0700
Committer: ravimagham <ra...@apache.org>
Committed: Wed Apr 15 19:03:33 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                        |   6 +-
 .../src/build/components/all-common-jars.xml    |  11 ++
 phoenix-spark/README.md                         |  74 ++++++++--
 phoenix-spark/pom.xml                           |   6 -
 phoenix-spark/src/it/resources/setup.sql        |   1 +
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 135 ++++++++++++++++---
 .../phoenix/spark/ConfigurationUtil.scala       |  65 +++++++++
 .../phoenix/spark/DataFrameFunctions.scala      |  51 +++++++
 .../apache/phoenix/spark/DefaultSource.scala    |  41 ++++++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   |  12 +-
 .../phoenix/spark/PhoenixRecordWritable.scala   |   2 +-
 .../apache/phoenix/spark/PhoenixRelation.scala  |  80 +++++++++++
 .../phoenix/spark/ProductRDDFunctions.scala     |  21 +--
 .../org/apache/phoenix/spark/package.scala      |   6 +-
 pom.xml                                         |   5 +
 15 files changed, 453 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 51f767f..b3a992e 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -142,9 +142,13 @@
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-flume</artifactId>
     </dependency>
-        <dependency>
+    <dependency>
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-pig</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-spark</artifactId>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-assembly/src/build/components/all-common-jars.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/components/all-common-jars.xml b/phoenix-assembly/src/build/components/all-common-jars.xml
index ce6da59..769e28f 100644
--- a/phoenix-assembly/src/build/components/all-common-jars.xml
+++ b/phoenix-assembly/src/build/components/all-common-jars.xml
@@ -71,5 +71,16 @@
       </excludes>
       <fileMode>0644</fileMode>
     </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/../phoenix-spark/target/</directory>
+      <outputDirectory>lib</outputDirectory>
+      <includes>
+          <include>phoenix-*.jar</include>
+      </includes>
+      <excludes>
+          <exclude></exclude>
+      </excludes>
+      <fileMode>0644</fileMode>
+    </fileSet>
   </fileSets>
 </component>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/README.md
----------------------------------------------------------------------
diff --git a/phoenix-spark/README.md b/phoenix-spark/README.md
index 1c030f8..1e53c98 100644
--- a/phoenix-spark/README.md
+++ b/phoenix-spark/README.md
@@ -11,7 +11,7 @@ UPSERT INTO TABLE1 (ID, COL1) VALUES (1, 'test_row_1');
 UPSERT INTO TABLE1 (ID, COL1) VALUES (2, 'test_row_2');
 ```
 
-### Load as a DataFrame
+### Load as a DataFrame using the Data Source API
 ```scala
 import org.apache.spark.SparkContext
 import org.apache.spark.sql.SQLContext
@@ -20,15 +20,39 @@ import org.apache.phoenix.spark._
 val sc = new SparkContext("local", "phoenix-test")
 val sqlContext = new SQLContext(sc)
 
+val df = sqlContext.load(
+  "org.apache.phoenix.spark", 
+  Map("table" -> "TABLE1", "zkUrl" -> "phoenix-server:2181")
+)
+
+df
+  .filter(df("COL1") === "test_row_1" && df("ID") === 1L)
+  .select(df("ID"))
+  .show
+```
+
+### Load as a DataFrame directly using a Configuration object
+```scala
+import org.apache.hadoop.conf.Configuration
+import org.apache.spark.SparkContext
+import org.apache.spark.sql.SQLContext
+import org.apache.phoenix.spark._
+
+val configuration = new Configuration()
+// Can set Phoenix-specific settings, requires 'hbase.zookeeper.quorum'
+
+val sc = new SparkContext("local", "phoenix-test")
+val sqlContext = new SQLContext(sc)
+
 // Load the columns 'ID' and 'COL1' from TABLE1 as a DataFrame
 val df = sqlContext.phoenixTableAsDataFrame(
-  "TABLE1", Array("ID", "COL1"), zkUrl = Some("phoenix-server:2181")
+  "TABLE1", Array("ID", "COL1"), conf = configuration
 )
 
 df.show
 ```
 
-### Load as an RDD
+### Load as an RDD, using a Zookeeper URL
 ```scala
 import org.apache.spark.SparkContext
 import org.apache.spark.sql.SQLContext
@@ -47,7 +71,10 @@ val firstId = rdd1.first()("ID").asInstanceOf[Long]
 val firstCol = rdd1.first()("COL1").asInstanceOf[String]
 ```
 
-## Saving RDDs to Phoenix
+## Saving RDDs to Phoenix 
+
+`saveToPhoenix` is an implicit method on RDD[Product], or an RDD of Tuples. The data types must
+correspond to the Java types Phoenix supports (http://phoenix.apache.org/language/datatypes.html)
 
 Given a Phoenix table with the following DDL
 
@@ -55,9 +82,6 @@ Given a Phoenix table with the following DDL
 CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER);
 ```
 
-`saveToPhoenix` is an implicit method on RDD[Product], or an RDD of Tuples. The data types must
-correspond to the Java types Phoenix supports (http://phoenix.apache.org/language/datatypes.html)
-
 ```scala
 import org.apache.spark.SparkContext
 import org.apache.phoenix.spark._
@@ -74,6 +98,38 @@ sc
   )
 ```
 
+## Saving DataFrames to Phoenix
+
+The `save` is method on DataFrame allows passing in a data source type. You can use
+`org.apache.phoenix.spark`, and must also pass in a `table` and `zkUrl` parameter to
+specify which table and server to persist the DataFrame to. The column names are derived from
+the DataFrame's schema field names, and must match the Phoenix column names.
+
+The `save` method also takes a `SaveMode` option, for which only `SaveMode.Overwrite` is supported.
+
+Given two Phoenix tables with the following DDL:
+
+```sql
+CREATE TABLE INPUT_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER);
+CREATE TABLE OUTPUT_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER);
+```
+
+```scala
+import org.apache.spark.SparkContext
+import org.apache.spark.sql.SQLContext
+import org.apache.phoenix.spark._
+
+// Load INPUT_TABLE
+val sc = new SparkContext("local", "phoenix-test")
+val sqlContext = new SQLContext(sc)
+val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "INPUT_TABLE",
+  "zkUrl" -> hbaseConnectionString))
+
+// Save to OUTPUT_TABLE
+df.save("org.apache.phoenix.spark", SaveMode.Overwrite, Map("table" -> "OUTPUT_TABLE", 
+  "zkUrl" -> hbaseConnectionString))
+```
+
 ## Notes
 
 The functions `phoenixTableAsDataFrame`, `phoenixTableAsRDD` and `saveToPhoenix` all support
@@ -85,5 +141,7 @@ in the `conf` parameter. Similarly, if no configuration is passed in, `zkUrl` mu
 
 ## Limitations
 
-- No pushdown predicate support from Spark SQL (yet)
+- Basic support for column and predicate pushdown using the Data Source API
+- The Data Source API does not support passing custom Phoenix settings in configuration, you must
+create the DataFrame or RDD directly if you need fine-grained configuration.
 - No support for aggregate or distinct functions (http://phoenix.apache.org/phoenix_mr.html)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 8b06cf7..adeed88 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -97,12 +97,6 @@
     </dependency>
 
     <dependency>
-      <groupId>org.xerial.snappy</groupId>
-      <artifactId>snappy-java</artifactId>
-      <version>1.1.1.6</version>
-    </dependency>
-
-    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
       <version>${hadoop-two.version}</version>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/it/resources/setup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/setup.sql b/phoenix-spark/src/it/resources/setup.sql
index ce74c58..40157a2 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -15,6 +15,7 @@
 -- limitations under the License.
 
 CREATE TABLE table1 (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
+CREATE TABLE table1_copy (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
 CREATE TABLE table2 (id BIGINT NOT NULL PRIMARY KEY, table1_id BIGINT, "t2col1" VARCHAR)
 UPSERT INTO table1 (id, col1) VALUES (1, 'test_row_1')
 UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (1, 1, 'test_child_1')

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 149baec..db99f65 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -17,14 +17,14 @@ import java.sql.{Connection, DriverManager}
 import java.util.Date
 
 import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hbase.{HConstants, HBaseTestingUtility}
+import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants, HBaseTestingUtility}
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT
 import org.apache.phoenix.query.BaseTest
 import org.apache.phoenix.schema.ColumnNotFoundException
 import org.apache.phoenix.schema.types.PVarchar
 import org.apache.phoenix.util.ColumnInfo
-import org.apache.spark.sql.SQLContext
-import org.apache.spark.sql.types.{StringType, StructField}
+import org.apache.spark.sql.{SaveMode, execution, SQLContext}
+import org.apache.spark.sql.types.{LongType, DataType, StringType, StructField}
 import org.apache.spark.{SparkConf, SparkContext}
 import org.joda.time.DateTime
 import org.scalatest._
@@ -139,7 +139,10 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
 
     df2.registerTempTable("sql_table_2")
 
-    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+    val sqlRdd = sqlContext.sql("""
+        |SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1
+        |INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)""".stripMargin
+    )
 
     val count = sqlRdd.count()
 
@@ -149,7 +152,9 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
   test("Can create schema RDD and execute query on case sensitive table (no config)") {
     val sqlContext = new SQLContext(sc)
 
-    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"), zkUrl = Some(quorumAddress))
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+      zkUrl = Some(quorumAddress))
 
     df1.registerTempTable("table3")
 
@@ -163,7 +168,8 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
   test("Can create schema RDD and execute constrained query") {
     val sqlContext = new SQLContext(sc)
 
-    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"),
+      conf = hbaseConfiguration)
 
     df1.registerTempTable("sql_table_1")
 
@@ -173,7 +179,10 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
 
     df2.registerTempTable("sql_table_2")
 
-    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+    val sqlRdd = sqlContext.sql("""
+      |SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1
+      |INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)""".stripMargin
+    )
 
     val count = sqlRdd.count()
 
@@ -194,7 +203,7 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
 
       // we have to execute an action before the predicate failure can occur
       val count = sqlRdd.count()
-    }.getCause shouldBe a [ColumnNotFoundException]
+    }.getCause shouldBe a[ColumnNotFoundException]
   }
 
   test("Can create schema RDD with predicate that will never match") {
@@ -216,10 +225,15 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
   test("Can create schema RDD with complex predicate") {
     val sqlContext = new SQLContext(sc)
 
-    val df1 = sqlContext.phoenixTableAsDataFrame("DATE_PREDICATE_TEST_TABLE", Array("ID", "TIMESERIES_KEY"),
-      predicate = Some("ID > 0 AND TIMESERIES_KEY BETWEEN CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)"),
+    val df1 = sqlContext.phoenixTableAsDataFrame(
+      "DATE_PREDICATE_TEST_TABLE",
+      Array("ID", "TIMESERIES_KEY"),
+      predicate = Some("""
+        |ID > 0 AND TIMESERIES_KEY BETWEEN
+        |CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND
+        |CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)""".stripMargin),
       conf = hbaseConfiguration)
-    
+
     df1.registerTempTable("date_predicate_test_table")
 
     val sqlRdd = df1.sqlContext.sql("SELECT * FROM date_predicate_test_table")
@@ -248,7 +262,7 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
 
     count shouldEqual 1L
   }
-  
+
   test("Can read a table as an RDD") {
     val rdd1 = sc.phoenixTableAsRDD("ARRAY_TEST_TABLE", Seq("ID", "VCARRAY"),
       conf = hbaseConfiguration)
@@ -271,7 +285,7 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
       .parallelize(dataSet)
       .saveToPhoenix(
         "OUTPUT_TEST_TABLE",
-        Seq("ID","COL1","COL2"),
+        Seq("ID", "COL1", "COL2"),
         hbaseConfiguration
       )
 
@@ -279,7 +293,7 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
     val stmt = conn.createStatement()
     val rs = stmt.executeQuery("SELECT ID, COL1, COL2 FROM OUTPUT_TEST_TABLE")
     val results = ListBuffer[(Long, String, Int)]()
-    while(rs.next()) {
+    while (rs.next()) {
       results.append((rs.getLong(1), rs.getString(2), rs.getInt(3)))
     }
 
@@ -306,7 +320,7 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
     val stmt = conn.createStatement()
     val rs = stmt.executeQuery("SELECT COL3 FROM OUTPUT_TEST_TABLE WHERE ID = 1 OR ID = 2 ORDER BY ID ASC")
     val results = ListBuffer[java.sql.Date]()
-    while(rs.next()) {
+    while (rs.next()) {
       results.append(rs.getDate(1))
     }
 
@@ -315,12 +329,89 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
     results(1).getTime shouldEqual date.getTime
   }
 
-  test("Not specifying a zkUrl or a config quorum URL should fail") {
-    intercept[UnsupportedOperationException] {
-      val sqlContext = new SQLContext(sc)
-      val badConf = new Configuration(hbaseConfiguration)
-      badConf.unset(HConstants.ZOOKEEPER_QUORUM)
-      sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = badConf)
+  test("Can infer schema without defining columns") {
+    val sqlContext = new SQLContext(sc)
+    val df = sqlContext.phoenixTableAsDataFrame("TABLE2", Seq(), conf = hbaseConfiguration)
+    df.schema("ID").dataType shouldEqual LongType
+    df.schema("TABLE1_ID").dataType shouldEqual LongType
+    df.schema("t2col1").dataType shouldEqual StringType
+  }
+
+  test("Spark SQL can use Phoenix as a data source with no schema specified") {
+    val sqlContext = new SQLContext(sc)
+    val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TABLE1",
+      "zkUrl" -> quorumAddress))
+    df.count() shouldEqual 2
+    df.schema("ID").dataType shouldEqual LongType
+    df.schema("COL1").dataType shouldEqual StringType
+  }
+
+  test("Spark SQL can use Phoenix as a data source with PrunedFilteredScan") {
+    val sqlContext = new SQLContext(sc)
+    val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TABLE1",
+      "zkUrl" -> quorumAddress))
+    val res = df.filter(df("COL1") === "test_row_1" && df("ID") === 1L).select(df("ID"))
+
+    // Make sure we got the right value back
+    assert(res.first().getLong(0) == 1L)
+
+    /*
+      NOTE: There doesn't appear to be any way of verifying from the Spark query planner that
+      filtering is being pushed down and done server-side. However, since PhoenixRelation
+      implements PrunedFilteredScan, debugging has shown that both the SELECT columns and WHERE
+      predicates are being passed along to us, which we then forward it to Phoenix.
+      TODO: investigate further to find a way to verify server-side pushdown
+     */
+  }
+
+  test("Can persist a dataframe using 'DataFrame.saveToPhoenix'") {
+    // Load from TABLE1
+    val sqlContext = new SQLContext(sc)
+    val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TABLE1",
+      "zkUrl" -> quorumAddress))
+
+    // Save to TABLE1_COPY
+    df.saveToPhoenix("TABLE1_COPY", zkUrl = Some(quorumAddress))
+
+    // Verify results
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT * FROM TABLE1_COPY")
+
+    val checkResults = List((1L, "test_row_1"), (2, "test_row_2"))
+    val results = ListBuffer[(Long, String)]()
+    while (rs.next()) {
+      results.append((rs.getLong(1), rs.getString(2)))
     }
+    stmt.close()
+
+    results.toList shouldEqual checkResults
   }
-}
+
+  test("Can persist a dataframe using 'DataFrame.save()") {
+    // Clear TABLE1_COPY
+    var stmt = conn.createStatement()
+    stmt.executeUpdate("DELETE FROM TABLE1_COPY")
+    stmt.close()
+
+    // Load TABLE1, save as TABLE1_COPY
+    val sqlContext = new SQLContext(sc)
+    val df = sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TABLE1",
+      "zkUrl" -> quorumAddress))
+
+    // Save to TABLE21_COPY
+    df.save("org.apache.phoenix.spark", SaveMode.Overwrite, Map("table" -> "TABLE1_COPY", "zkUrl" -> quorumAddress))
+
+    // Verify results
+    stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT * FROM TABLE1_COPY")
+
+    val checkResults = List((1L, "test_row_1"), (2, "test_row_2"))
+    val results = ListBuffer[(Long, String)]()
+    while (rs.next()) {
+      results.append((rs.getLong(1), rs.getString(2)))
+    }
+    stmt.close()
+
+    results.toList shouldEqual checkResults
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala
new file mode 100644
index 0000000..c0c7248
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala
@@ -0,0 +1,65 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
+import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, PhoenixConfigurationUtil}
+import org.apache.phoenix.util.ColumnInfo
+import scala.collection.JavaConversions._
+
+object ConfigurationUtil extends Serializable {
+
+  def getOutputConfiguration(tableName: String, columns: Seq[String], zkUrl: Option[String], conf: Option[Configuration]): Configuration = {
+
+    // Create an HBaseConfiguration object from the passed in config, if present
+    val config = conf match {
+      case Some(c) => HBaseConfiguration.create(c)
+      case _ => HBaseConfiguration.create()
+    }
+
+    // Set the table to save to
+    PhoenixConfigurationUtil.setOutputTableName(config, tableName)
+
+    // Infer column names from the DataFrame schema
+    PhoenixConfigurationUtil.setUpsertColumnNames(config, columns.mkString(","))
+
+    // Override the Zookeeper URL if present. Throw exception if no address given.
+    zkUrl match {
+      case Some(url) => config.set(HConstants.ZOOKEEPER_QUORUM, url )
+      case _ => {
+        if(config.get(HConstants.ZOOKEEPER_QUORUM) == null) {
+          throw new UnsupportedOperationException(
+            s"One of zkUrl or '${HConstants.ZOOKEEPER_QUORUM}' config property must be provided"
+          )
+        }
+      }
+    }
+
+    // Return the configuration object
+    config
+  }
+
+  // Return a serializable representation of the columns
+  def encodeColumns(conf: Configuration): String = {
+    ColumnInfoToStringEncoderDecoder.encode(
+      PhoenixConfigurationUtil.getUpsertColumnMetadataList(conf)
+    )
+  }
+
+  // Decode the columns to a list of ColumnInfo objects
+  def decodeColumns(encodedColumns: String): List[ColumnInfo] = {
+    ColumnInfoToStringEncoderDecoder.decode(encodedColumns).toList
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
new file mode 100644
index 0000000..e17d7a5
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -0,0 +1,51 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.io.NullWritable
+import org.apache.phoenix.mapreduce.PhoenixOutputFormat
+import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, PhoenixConfigurationUtil}
+import org.apache.spark.Logging
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.DataFrame
+
+class DataFrameFunctions(data: DataFrame) extends Logging with Serializable {
+
+  def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
+                    zkUrl: Option[String] = None): Unit = {
+
+    val config = ConfigurationUtil.getOutputConfiguration(tableName, data.schema.fieldNames, zkUrl, Some(conf))
+
+    // Encode the column info to a serializable type
+    val encodedColumns = ConfigurationUtil.encodeColumns(config)
+
+    // Map the row object into a PhoenixRecordWritable
+    val phxRDD: RDD[(NullWritable, PhoenixRecordWritable)] = data.map { row =>
+      val rec = new PhoenixRecordWritable(encodedColumns)
+      row.toSeq.foreach { e => rec.add(e) }
+      (null, rec)
+    }
+
+    // Save it
+    phxRDD.saveAsNewAPIHadoopFile(
+      "",
+      classOf[NullWritable],
+      classOf[PhoenixRecordWritable],
+      classOf[PhoenixOutputFormat[PhoenixRecordWritable]],
+      config
+    )
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
new file mode 100644
index 0000000..b0e9754
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala
@@ -0,0 +1,41 @@
+package org.apache.phoenix.spark
+
+import org.apache.spark.sql.{SaveMode, DataFrame, SQLContext}
+import org.apache.spark.sql.sources.{CreatableRelationProvider, BaseRelation, RelationProvider}
+import org.apache.phoenix.spark._
+
+class DefaultSource extends RelationProvider with CreatableRelationProvider {
+
+  // Override 'RelationProvider.createRelation', this enables DataFrame.load()
+  override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = {
+    verifyParameters(parameters)
+
+    new PhoenixRelation(
+      parameters("table"),
+      parameters("zkUrl")
+    )(sqlContext)
+  }
+
+  // Override 'CreatableRelationProvider.createRelation', this enables DataFrame.save()
+  override def createRelation(sqlContext: SQLContext, mode: SaveMode,
+                              parameters: Map[String, String], data: DataFrame): BaseRelation = {
+
+    if (!mode.equals(SaveMode.Overwrite)) {
+      throw new Exception("SaveMode other than SaveMode.OverWrite is not supported")
+    }
+
+    verifyParameters(parameters)
+
+    // Save the DataFrame to Phoenix
+    data.saveToPhoenix(parameters("table"), zkUrl = parameters.get("zkUrl"))
+
+    // Return a relation of the saved data
+    createRelation(sqlContext, parameters)
+  }
+
+  // Ensure the required parameters are present
+  def verifyParameters(parameters: Map[String, String]): Unit = {
+    if (parameters.get("table").isEmpty) throw new RuntimeException("No Phoenix 'table' option defined")
+    if (parameters.get("zkUrl").isEmpty) throw new RuntimeException("No Phoenix 'zkUrl' option defined")
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index b27f9f9..9a359e3 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -14,7 +14,7 @@
 package org.apache.phoenix.spark
 
 import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.hadoop.io.NullWritable
 import org.apache.phoenix.mapreduce.PhoenixInputFormat
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
@@ -65,12 +65,12 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: Seq[String],
 
   def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
     val query = "SELECT %s FROM \"%s\"".format(
-      columns.map(f => "\"" + f + "\"").mkString(", "),
+      if (columns.isEmpty) "*" else columns.map(f => "\"" + f + "\"").mkString(", "),
       table
     )
 
     query + (predicate match {
-      case Some(p: String) => " WHERE " + p
+      case Some(p: String) if p.length > 0 => " WHERE " + p
       case _ => ""
     })
   }
@@ -79,10 +79,12 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: Seq[String],
 
     // This is just simply not serializable, so don't try, but clone it because
     // PhoenixConfigurationUtil mutates it.
-    val config = new Configuration(conf)
+    val config = HBaseConfiguration.create(conf)
 
     PhoenixConfigurationUtil.setInputQuery(config, buildSql(table, columns, predicate))
-    PhoenixConfigurationUtil.setSelectColumnNames(config, columns.mkString(","))
+    if(!columns.isEmpty) {
+      PhoenixConfigurationUtil.setSelectColumnNames(config, columns.mkString(","))
+    }
     PhoenixConfigurationUtil.setInputTableName(config, "\"" + table + "\"")
     PhoenixConfigurationUtil.setInputClass(config, classOf[PhoenixRecordWritable])
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
index 48a70ec..67e0bd2 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
@@ -31,7 +31,7 @@ class PhoenixRecordWritable(var encodedColumns: String) extends DBWritable {
 
   override def write(statement: PreparedStatement): Unit = {
     // Decode the ColumnInfo list
-    val columns = ColumnInfoToStringEncoderDecoder.decode(encodedColumns).toList
+    val columns = ConfigurationUtil.decodeColumns(encodedColumns)
 
     // Make sure we at least line up in size
     if(upsertValues.length != columns.length) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
new file mode 100644
index 0000000..4177022
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
@@ -0,0 +1,80 @@
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.types.StructType
+import org.apache.spark.sql.{Row, SQLContext}
+import org.apache.spark.sql.sources._
+import org.apache.commons.lang.StringEscapeUtils.escapeSql
+
+case class PhoenixRelation(tableName: String, zkUrl: String)(@transient val sqlContext: SQLContext)
+    extends BaseRelation with PrunedFilteredScan {
+
+  /*
+    This is the buildScan() implementing Spark's PrunedFilteredScan.
+    Spark SQL queries with columns or predicates specified will be pushed down
+    to us here, and we can pass that on to Phoenix. According to the docs, this
+    is an optimization, and the filtering/pruning will be re-evaluated again,
+    but this prevents having to load the whole table into Spark first.
+  */
+  override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
+    new PhoenixRDD(
+      sqlContext.sparkContext,
+      tableName,
+      requiredColumns,
+      Some(buildFilter(filters)),
+      Some(zkUrl),
+      new Configuration()
+    ).toDataFrame(sqlContext).rdd
+  }
+
+  // Required by BaseRelation, this will return the full schema for a table
+  override def schema: StructType = {
+    new PhoenixRDD(
+      sqlContext.sparkContext,
+      tableName,
+      Seq(),
+      None,
+      Some(zkUrl),
+      new Configuration()
+    ).toDataFrame(sqlContext).schema
+  }
+
+  // Attempt to create Phoenix-accepted WHERE clauses from Spark filters,
+  // mostly inspired from Spark SQL JDBCRDD and the couchbase-spark-connector
+  private def buildFilter(filters: Array[Filter]): String = {
+    if (filters.isEmpty) {
+      return ""
+    }
+
+    val filter = new StringBuilder("")
+    var i = 0
+
+    filters.foreach(f => {
+      if (i > 0) {
+        filter.append(" AND")
+      }
+
+      f match {
+        case EqualTo(attr, value) => filter.append(s" $attr = ${compileValue(value)}")
+        case GreaterThan(attr, value) => filter.append(s" $attr > ${compileValue(value)}")
+        case GreaterThanOrEqual(attr, value) => filter.append(s" $attr >= ${compileValue(value)}")
+        case LessThan(attr, value) => filter.append(s" $attr < ${compileValue(value)}")
+        case LessThanOrEqual(attr, value) => filter.append(s" $attr <= ${compileValue(value)}")
+        case IsNull(attr) => filter.append(s" $attr IS NULL")
+        case IsNotNull(attr) => filter.append(s" $attr IS NOT NULL")
+        case _ => throw new Exception("Unsupported filter")
+      }
+
+      i = i + 1
+    })
+
+    filter.toString()
+  }
+
+  // Helper function to escape string values in SQL queries
+  private def compileValue(value: Any): Any = value match {
+    case stringValue: String => s"'${escapeSql(stringValue)}'"
+    case _ => value
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
index 2926569..3d24fb9 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
@@ -27,27 +27,10 @@ class ProductRDDFunctions[A <: Product](data: RDD[A]) extends Logging with Seria
                     conf: Configuration = new Configuration, zkUrl: Option[String] = None)
                     : Unit = {
 
-    // Setup Phoenix output configuration, make a local copy
-    val config = new Configuration(conf)
-    PhoenixConfigurationUtil.setOutputTableName(config, tableName)
-    PhoenixConfigurationUtil.setUpsertColumnNames(config, cols.mkString(","))
-
-    // Override the Zookeeper URL if present. Throw exception if no address given.
-    zkUrl match {
-      case Some(url) => config.set(HConstants.ZOOKEEPER_QUORUM, url )
-      case _ => {
-        if(config.get(HConstants.ZOOKEEPER_QUORUM) == null) {
-          throw new UnsupportedOperationException(
-            s"One of zkUrl or '${HConstants.ZOOKEEPER_QUORUM}' config property must be provided"
-          )
-        }
-      }
-    }
+    val config = ConfigurationUtil.getOutputConfiguration(tableName, cols, zkUrl, Some(conf))
 
     // Encode the column info to a serializable type
-    val encodedColumns = ColumnInfoToStringEncoderDecoder.encode(
-      PhoenixConfigurationUtil.getUpsertColumnMetadataList(config)
-    )
+    val encodedColumns = ConfigurationUtil.encodeColumns(config)
 
     // Map each element of the product to a new (NullWritable, PhoenixRecordWritable)
     val phxRDD: RDD[(NullWritable, PhoenixRecordWritable)] = data.map { e =>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
index c19ec16..3fed79e 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
@@ -15,7 +15,7 @@ package org.apache.phoenix
 
 import org.apache.spark.SparkContext
 import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.{DataFrame, SQLContext}
 
 package object spark {
   implicit def toProductRDDFunctions[A <: Product](rdd: RDD[A]): ProductRDDFunctions[A] = {
@@ -29,4 +29,8 @@ package object spark {
   implicit def toSparkSqlContextFunctions(sqlContext: SQLContext): SparkSqlContextFunctions = {
     new SparkSqlContextFunctions(sqlContext)
   }
+
+  implicit def toDataFrameFunctions(data: DataFrame): DataFrameFunctions = {
+    new DataFrameFunctions(data)
+  }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3fb3bb4d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index b81dfb5..977218d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -428,6 +428,11 @@
         <artifactId>phoenix-pig</artifactId>
         <version>${project.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.phoenix</groupId>
+        <artifactId>phoenix-spark</artifactId>
+        <version>${project.version}</version>
+      </dependency>
 
       <!-- HBase dependencies -->
       <dependency>


[44/50] [abbrv] phoenix git commit: PHOENIX-1861 Padding character should be inverted if sort order is descending (Dumindu Buddhika)

Posted by ma...@apache.org.
PHOENIX-1861 Padding character should be inverted if sort order is descending (Dumindu Buddhika)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b0c28a2d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b0c28a2d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b0c28a2d

Branch: refs/heads/calcite
Commit: b0c28a2de9b8b2807abaeddf1ec5430cc9f13c61
Parents: 3f6b259
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Apr 14 12:26:09 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Apr 14 12:26:09 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/compile/WhereOptimizer.java   | 2 +-
 .../src/main/java/org/apache/phoenix/schema/PTableImpl.java        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0c28a2d/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index e25cfbc..a5aef02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -1210,7 +1210,7 @@ public class WhereOptimizer {
                     Integer length = getColumn().getMaxLength();
                     if (length != null) {
                         // Go through type to pad as the fill character depends on the type.
-                        type.pad(ptr, length, SortOrder.getDefault());
+                        type.pad(ptr, length, getColumn().getSortOrder());
                     }
                 }
                 byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0c28a2d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 702edbd..088595b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -703,7 +703,7 @@ public class PTableImpl implements PTable {
                 Integer	maxLength = column.getMaxLength();
             	if (!isNull && type.isFixedWidth() && maxLength != null) {
     				if (ptr.getLength() <= maxLength) {
-                        type.pad(ptr, maxLength, SortOrder.getDefault());
+                        type.pad(ptr, maxLength, column.getSortOrder());
                     } else if (ptr.getLength() > maxLength) {
                         throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " bytes (" + type.toObject(byteValue) + ")");
                     }


[05/50] [abbrv] phoenix git commit: PHOENIX-1457 Use high priority queue for metadata endpoint calls

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
deleted file mode 100644
index 7d08c0d..0000000
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.junit.Test;
-
-public class PhoenixIndexRpcSchedulerFactoryTest {
-
-    @Test
-    public void ensureInstantiation() throws Exception {
-        Configuration conf = new Configuration(false);
-        conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-            PhoenixIndexRpcSchedulerFactory.class, RpcSchedulerFactory.class);
-        // kinda lame that we copy the copy from the regionserver to do this and can't use a static
-        // method, but meh
-        try {
-            Class<?> rpcSchedulerFactoryClass =
-                    conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-                        SimpleRpcSchedulerFactory.class);
-            Object o = rpcSchedulerFactoryClass.newInstance();
-            assertTrue(o instanceof PhoenixIndexRpcSchedulerFactory);
-        } catch (InstantiationException e) {
-            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
-                false);
-        } catch (IllegalAccessException e) {
-            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
-                false);
-        }
-    }
-
-    /**
-     * Ensure that we can't configure the index priority ranges inside the hbase ranges
-     * @throws Exception
-     */
-    @Test
-    public void testValidateIndexPriorityRanges() throws Exception {
-        Configuration conf = new Configuration(false);
-        // standard configs should be fine
-        PhoenixIndexRpcSchedulerFactory factory = new PhoenixIndexRpcSchedulerFactory();
-        factory.create(conf, null);
-
-        setMinMax(conf, 0, 4);
-        factory.create(conf, null);
-
-        setMinMax(conf, 201, 202);
-        factory.create(conf, null);
-
-        setMinMax(conf, 102, 101);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed max less than min");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-
-        setMinMax(conf, 5, 6);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed min in range");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-
-        setMinMax(conf, 6, 60);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed min/max in hbase range");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-
-        setMinMax(conf, 6, 101);
-        try {
-            factory.create(conf, null);
-            fail("Should not have allowed in range");
-        } catch (IllegalArgumentException e) {
-            // expected
-        }
-    }
-
-    private void setMinMax(Configuration conf, int min, int max) {
-        conf.setInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, min);
-        conf.setInt(QueryServices.MAX_INDEX_PRIOIRTY_ATTRIB, max);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java
new file mode 100644
index 0000000..eb28c8d
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.junit.Test;
+
+public class PhoenixRpcSchedulerFactoryTest {
+
+    @Test
+    public void ensureInstantiation() throws Exception {
+        Configuration conf = new Configuration(false);
+        conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+            PhoenixRpcSchedulerFactory.class, RpcSchedulerFactory.class);
+        // kinda lame that we copy the copy from the regionserver to do this and can't use a static
+        // method, but meh
+        try {
+            Class<?> rpcSchedulerFactoryClass =
+                    conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                        SimpleRpcSchedulerFactory.class);
+            Object o = rpcSchedulerFactoryClass.newInstance();
+            assertTrue(o instanceof PhoenixRpcSchedulerFactory);
+        } catch (InstantiationException e) {
+            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
+                false);
+        } catch (IllegalAccessException e) {
+            assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e,
+                false);
+        }
+    }
+
+    /**
+     * Ensure that we can't configure the index and metadata priority ranges inside the hbase ranges
+     * @throws Exception
+     */
+    @Test
+    public void testValidateRpcPriorityRanges() throws Exception {
+        Configuration conf = new Configuration(false);
+        // standard configs should be fine
+        PhoenixRpcSchedulerFactory factory = new PhoenixRpcSchedulerFactory();
+        factory.create(conf, null);
+
+        // test priorities less than HBase range
+        setPriorities(conf, -4, -1);
+        factory.create(conf, null);
+
+        // test priorities greater than HBase range
+        setPriorities(conf, 1001, 1002);
+        factory.create(conf, null);
+
+        // test priorities in HBase range
+        setPriorities(conf, 1, 201);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        setPriorities(conf, 1001, 1);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        
+        // test priorities in HBase range
+        setPriorities(conf, 1001, HConstants.NORMAL_QOS);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        setPriorities(conf, HConstants.NORMAL_QOS, 1001);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        
+        // test priorities in HBase range
+        setPriorities(conf, 1001, HConstants.HIGH_QOS);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+        setPriorities(conf, HConstants.HIGH_QOS, 1001);
+        try {
+            factory.create(conf, null);
+            fail("Should not have allowed priorities in HBase range");
+        } catch (IllegalArgumentException e) {
+            // expected
+        }
+    }
+
+    private void setPriorities(Configuration conf, int indexPrioritymin, int metadataPriority) {
+        conf.setInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, indexPrioritymin);
+        conf.setInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, metadataPriority);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index e4ec56a..748ad19 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -457,7 +457,7 @@ public abstract class BaseTest {
     }
     
     private static final String ORG_ID = "00D300000000XHP";
-    private static final int NUM_SLAVES_BASE = 1;
+    protected static int NUM_SLAVES_BASE = 1;
     
     protected static String getZKClientPort(Configuration conf) {
         return conf.get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
@@ -531,9 +531,13 @@ public abstract class BaseTest {
     }
             
     protected static void setUpTestDriver(ReadOnlyProps props) throws Exception {
-        String url = checkClusterInitialized(props);
+        setUpTestDriver(props, props);
+    }
+    
+    protected static void setUpTestDriver(ReadOnlyProps serverProps, ReadOnlyProps clientProps) throws Exception {
+        String url = checkClusterInitialized(serverProps);
         if (driver == null) {
-            driver = initAndRegisterDriver(url, props);
+            driver = initAndRegisterDriver(url, clientProps);
         }
     }
 
@@ -557,7 +561,7 @@ public abstract class BaseTest {
         setUpConfigForMiniCluster(conf, overrideProps);
         utility = new HBaseTestingUtility(conf);
         try {
-            utility.startMiniCluster();
+            utility.startMiniCluster(NUM_SLAVES_BASE);
             // add shutdown hook to kill the mini cluster
             Runtime.getRuntime().addShutdownHook(new Thread() {
                 @Override


[26/50] [abbrv] phoenix git commit: PHOENIX-1794 Support Long.MIN_VALUE for phoenix BIGINT type

Posted by ma...@apache.org.
PHOENIX-1794 Support Long.MIN_VALUE for phoenix BIGINT type


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7aea6921
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7aea6921
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7aea6921

Branch: refs/heads/calcite
Commit: 7aea69215a007103f031d1e006e8b4057ec7ffc5
Parents: 5ea3607
Author: James Taylor <ja...@apache.org>
Authored: Mon Apr 6 14:07:57 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Mon Apr 6 14:09:19 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/ArrayIT.java     | 12 ++--
 .../end2end/ClientTimeArithmeticQueryIT.java    | 10 +--
 .../phoenix/end2end/CoalesceFunctionIT.java     |  2 +-
 .../org/apache/phoenix/end2end/NotQueryIT.java  |  8 +--
 phoenix-core/src/main/antlr3/PhoenixSQL.g       | 67 ++++----------------
 .../apache/phoenix/parse/LiteralParseNode.java  |  5 ++
 .../apache/phoenix/parse/ParseNodeFactory.java  | 42 ++++++++++++
 .../apache/phoenix/schema/types/PDataType.java  |  1 +
 .../apache/phoenix/schema/types/PDouble.java    |  1 -
 .../phoenix/compile/QueryCompilerTest.java      | 46 ++++++++++++++
 10 files changed, 121 insertions(+), 73 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
index ff8601b..d7dce54 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
@@ -463,7 +463,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
             props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
             conn = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT a_double_array[1]  FROM " + SIMPLE_TABLE_WITH_ARRAY
-                    + " WHERE a_double_array[2] = 89.96d or a_char_array[0] = 'a'";
+                    + " WHERE a_double_array[2] = 89.96 or a_char_array[0] = 'a'";
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -493,7 +493,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
             props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
             conn = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT a_double_array[1]  FROM " + SIMPLE_TABLE_WITH_ARRAY
-                    + " WHERE 89.96d = ANY(a_double_array)";
+                    + " WHERE CAST(89.96 AS DOUBLE) = ANY(a_double_array)";
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -522,7 +522,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
             props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
             conn = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT a_double_array[1]  FROM " + SIMPLE_TABLE_WITH_ARRAY
-                    + " WHERE 64.87d = ALL(a_double_array)";
+                    + " WHERE CAST(64.87 as DOUBLE) = ALL(a_double_array)";
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertFalse(rs.next());
@@ -546,7 +546,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
             props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
             conn = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT a_double_array[1]  FROM " + SIMPLE_TABLE_WITH_ARRAY
-                    + " WHERE  a_char_array[0] = 'f' or 89.96d > ANY(a_double_array)";
+                    + " WHERE  a_char_array[0] = 'f' or CAST(89.96 AS DOUBLE) > ANY(a_double_array)";
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -575,7 +575,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
             props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
             conn = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT a_double_array[1], a_double_array[2]  FROM " + SIMPLE_TABLE_WITH_ARRAY
-                    + " WHERE  a_char_array[0] = 'f' or 100.0d > ALL(a_double_array)";
+                    + " WHERE  a_char_array[0] = 'f' or CAST(100.0 AS DOUBLE) > ALL(a_double_array)";
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -1001,7 +1001,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
 		createTableWithArray(getUrl(),
 				getDefaultSplits(tenantId), null, ts - 2);
 		initTablesWithArrays(tenantId, null, ts, false, getUrl());
-		String query = "SELECT a_double_array FROM TABLE_WITH_ARRAY WHERE a_double_array = ARRAY [ 25.343d, 36.763d, 37.56d,386.63d]";
+		String query = "SELECT a_double_array FROM TABLE_WITH_ARRAY WHERE a_double_array = CAST(ARRAY [ 25.343, 36.763, 37.56,386.63] AS DOUBLE ARRAY)";
 		Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 		props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
 				Long.toString(ts + 2)); // Execute at timestamp 2

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
index 17ed19d..00d835c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
@@ -72,7 +72,7 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
     
     @Test
     public void testDateAdd() throws Exception {
-        String query = "SELECT entity_id, b_string FROM ATABLE WHERE a_date + 0.5d < ?";
+        String query = "SELECT entity_id, b_string FROM ATABLE WHERE a_date + CAST(0.5 AS DOUBLE) < ?";
         String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
@@ -236,7 +236,7 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
     }
     @Test
     public void testDoubleSubtractionExpression() throws Exception {
-        String query = "SELECT entity_id FROM aTable where a_double - 0.0002d  < 0";
+        String query = "SELECT entity_id FROM aTable where a_double - CAST(0.0002 AS DOUBLE)  < 0";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -338,7 +338,7 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
     
     @Test
     public void testDoubleDivideExpression() throws Exception {
-        String query = "SELECT entity_id FROM aTable where a_double / 3.0d = 0.0003";
+        String query = "SELECT entity_id FROM aTable where a_double / CAST(3.0 AS DOUBLE) = 0.0003";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -455,7 +455,7 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
     
     @Test
     public void testDoubleMultiplyExpression() throws Exception {
-        String query = "SELECT entity_id FROM aTable where A_DOUBLE * 2.0d = 0.0002";
+        String query = "SELECT entity_id FROM aTable where A_DOUBLE * CAST(2.0 AS DOUBLE) = 0.0002";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -577,7 +577,7 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
     
     @Test
     public void testDateSubtract() throws Exception {
-        String query = "SELECT entity_id, b_string FROM ATABLE WHERE a_date - 0.5d > ?";
+        String query = "SELECT entity_id, b_string FROM ATABLE WHERE a_date - CAST(0.5 AS DOUBLE) > ?";
         String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
index 1ad647b..92a9376 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CoalesceFunctionIT.java
@@ -78,7 +78,7 @@ public class CoalesceFunctionIT extends BaseHBaseManagedTimeIT {
 
         ResultSet rs = conn.createStatement().executeQuery(
                 "SELECT "
-                + "COALESCE(SUM(COUNT), 0L) " //explicitly def long
+                + "COALESCE(SUM(COUNT), CAST(0 AS BIGINT)) " //explicitly def long
                 + "FROM TEST_COALESCE "
                 + "GROUP BY ID");
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/it/java/org/apache/phoenix/end2end/NotQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NotQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NotQueryIT.java
index 21074fc..7fd7979 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NotQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NotQueryIT.java
@@ -207,7 +207,7 @@ public class NotQueryIT extends BaseQueryIT {
     @Test
     public void testNotEqualsByFloat() throws Exception {
         String query = "SELECT a_float -- and here comment\n" + 
-        "FROM aTable WHERE organization_id=? and a_float != 0.01d and a_float <= 0.02d";
+        "FROM aTable WHERE organization_id=? and a_float != CAST(0.01 AS FLOAT) and a_float <= CAST(0.02 AS FLOAT)";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -226,7 +226,7 @@ public class NotQueryIT extends BaseQueryIT {
     @Test
     public void testNotEqualsByUnsignedFloat() throws Exception {
         String query = "SELECT a_unsigned_float -- and here comment\n" + 
-        "FROM aTable WHERE organization_id=? and a_unsigned_float != 0.01d and a_unsigned_float <= 0.02d";
+        "FROM aTable WHERE organization_id=? and a_unsigned_float != 0.01 and a_unsigned_float <= 0.02";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -245,7 +245,7 @@ public class NotQueryIT extends BaseQueryIT {
     @Test
     public void testNotEqualsByDouble() throws Exception {
         String query = "SELECT a_double -- and here comment\n" + 
-        "FROM aTable WHERE organization_id=? and a_double != 0.0001d and a_double <= 0.0002d";
+        "FROM aTable WHERE organization_id=? and a_double != CAST(0.0001 AS DOUBLE) and a_double <= CAST(0.0002 AS DOUBLE)";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
@@ -264,7 +264,7 @@ public class NotQueryIT extends BaseQueryIT {
     @Test
     public void testNotEqualsByUnsignedDouble() throws Exception {
         String query = "SELECT a_unsigned_double -- and here comment\n" + 
-        "FROM aTable WHERE organization_id=? and a_unsigned_double != 0.0001d and a_unsigned_double <= 0.0002d";
+        "FROM aTable WHERE organization_id=? and a_unsigned_double != 0.0001 and a_unsigned_double <= 0.0002";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 03ec9f5..295bd79 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -425,7 +425,7 @@ create_sequence_node returns [CreateSequenceStatement ret]
     ;
 
 int_literal_or_bind returns [ParseNode ret]
-    : n=int_literal { $ret = n; }
+    : n=int_or_long_literal { $ret = n; }
     | b=bind_expression { $ret = b; }
     ;
 
@@ -630,7 +630,7 @@ delete_node returns [DeleteStatement ret]
 
 limit returns [LimitNode ret]
     : b=bind_expression { $ret = factory.limit(b); }
-    | l=int_literal { $ret = factory.limit(l); }
+    | l=int_or_long_literal { $ret = factory.limit(l); }
     ;
     
 sampling_rate returns [LiteralParseNode ret]
@@ -894,18 +894,14 @@ literal_or_bind returns [ParseNode ret]
 
 // Get a string, integer, double, date, boolean, or NULL value.
 literal returns [LiteralParseNode ret]
-    :   t=STRING_LITERAL {
-            ret = factory.literal(t.getText()); 
+    :   s=STRING_LITERAL {
+            ret = factory.literal(s.getText()); 
         }
-    |   l=int_literal { ret = l; }
-    |   l=long_literal { ret = l; }
-    |   l=double_literal { ret = l; }
-    |   t=DECIMAL {
-            try {
-                ret = factory.literal(new BigDecimal(t.getText()));
-            } catch (NumberFormatException e) { // Shouldn't happen since we just parsed a decimal
-                throwRecognitionException(t);
-            }
+    |   n=NUMBER {
+            ret = factory.wholeNumber(n.getText());
+        }
+    |   d=DECIMAL  {
+            ret = factory.realNumber(d.getText());
         }
     |   NULL {ret = factory.literal(null);}
     |   TRUE {ret = factory.literal(Boolean.TRUE);} 
@@ -919,42 +915,9 @@ literal returns [LiteralParseNode ret]
         }
     ;
     
-int_literal returns [LiteralParseNode ret]
+int_or_long_literal returns [LiteralParseNode ret]
     :   n=NUMBER {
-            try {
-                Long v = Long.valueOf(n.getText());
-                if (v >= Integer.MIN_VALUE && v <= Integer.MAX_VALUE) {
-                    ret = factory.literal(v.intValue());
-                } else {
-                    ret = factory.literal(v);
-                }
-            } catch (NumberFormatException e) { // Shouldn't happen since we just parsed a number
-                throwRecognitionException(n);
-            }
-        }
-    ;
-
-long_literal returns [LiteralParseNode ret]
-    :   l=LONG {
-            try {
-                String lt = l.getText();
-                Long v = Long.valueOf(lt.substring(0, lt.length() - 1));
-                ret = factory.literal(v);
-            } catch (NumberFormatException e) { // Shouldn't happen since we just parsed a number
-                throwRecognitionException(l);
-            }
-        }
-    ;
-
-double_literal returns [LiteralParseNode ret]
-    :   d=DOUBLE {
-            try {
-                String dt = d.getText();
-                Double v = Double.valueOf(dt.substring(0, dt.length() - 1));
-                ret = factory.literal(v);
-            } catch (NumberFormatException e) { // Shouldn't happen since we just parsed a number
-                throwRecognitionException(d);
-            }
+            ret = factory.intOrLong(n.getText());
         }
     ;
 
@@ -1004,19 +967,11 @@ NUMBER
     :   POSINTEGER
     ;
 
-LONG
-    :   POSINTEGER ('L'|'l')
-    ;
-
 // Exponential format is not supported.
 DECIMAL
     :   POSINTEGER? '.' POSINTEGER
     ;
 
-DOUBLE
-    :   DECIMAL ('D'|'d')
-    ;
-
 DOUBLE_QUOTE
     :   '"'
     ;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
index e0e8c3b..85f4ee5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.parse;
 
+import java.math.BigDecimal;
 import java.sql.SQLException;
 import java.util.Collections;
 import java.util.List;
@@ -39,6 +40,10 @@ public class LiteralParseNode extends TerminalParseNode {
     public static final ParseNode ONE = new LiteralParseNode(1);
     public static final ParseNode MINUS_ONE = new LiteralParseNode(-1L);
     public static final ParseNode TRUE = new LiteralParseNode(true);
+    // Parser representation of Long.MIN_VALUE, as ABS(Long.MIN_VALUE) is too big to fit into a Long
+    public static final ParseNode MIN_LONG_AS_BIG_DECIMAL = new LiteralParseNode(BigDecimal.valueOf(Long.MIN_VALUE).abs());
+    // See ParseNodeFactory.negate(), as MIN_LONG_AS_BIG_DECIMAL will be converted MIN_LONG if negated.
+    public static final ParseNode MIN_LONG = new LiteralParseNode(Long.MIN_VALUE);
     
     private final Object value;
     private final PDataType type;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 5aba933..0f5074e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.parse;
 
 import java.lang.reflect.Constructor;
+import java.math.BigDecimal;
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
 import java.util.Arrays;
@@ -73,6 +74,8 @@ public class ParseNodeFactory {
         AvgAggregateFunction.class
         );
     private static final Map<BuiltInFunctionKey, BuiltInFunctionInfo> BUILT_IN_FUNCTION_MAP = Maps.newHashMap();
+    private static final BigDecimal MAX_LONG = BigDecimal.valueOf(Long.MAX_VALUE);
+
 
     /**
      *
@@ -455,6 +458,39 @@ public class ParseNodeFactory {
         return new LiteralParseNode(value);
     }
 
+    public LiteralParseNode realNumber(String text) {
+        return new LiteralParseNode(new BigDecimal(text, PDataType.DEFAULT_MATH_CONTEXT));
+    }
+    
+    public LiteralParseNode wholeNumber(String text) {
+        int length = text.length();
+        // We know it'll fit into long, might still fit into int
+        if (length <= PDataType.LONG_PRECISION-1) {
+            long l = Long.parseLong(text);
+            if (l <= Integer.MAX_VALUE) {
+                // Fits into int
+                return new LiteralParseNode((int)l);
+            }
+            return new LiteralParseNode(l);
+        }
+        // Might still fit into long
+        BigDecimal d = new BigDecimal(text, PDataType.DEFAULT_MATH_CONTEXT);
+        if (d.compareTo(MAX_LONG) <= 0) {
+            return new LiteralParseNode(d.longValueExact());
+        }
+        // Doesn't fit into long
+        return new LiteralParseNode(d);
+    }
+
+    public LiteralParseNode intOrLong(String text) {
+        long l = Long.parseLong(text);
+        if (l <= Integer.MAX_VALUE) {
+            // Fits into int
+            return new LiteralParseNode((int)l);
+        }
+        return new LiteralParseNode(l);
+    }
+
     public CastParseNode cast(ParseNode expression, String dataType, Integer maxLength, Integer scale) {
         return new CastParseNode(expression, dataType, maxLength, scale, false);
     }
@@ -587,6 +623,12 @@ public class ParseNodeFactory {
                 PLong.INSTANCE)) {
             return LiteralParseNode.MINUS_ONE;
         }
+        // Special case to convert Long.MIN_VALUE back to a Long. We can't initially represent it
+        // as a Long in the parser because we only represent positive values as constants in the
+        // parser, and ABS(Long.MIN_VALUE) is too big to fit into a Long. So we convert it back here.
+        if (LiteralParseNode.MIN_LONG_AS_BIG_DECIMAL.equals(child)) {
+            return LiteralParseNode.MIN_LONG;
+        }
         return new MultiplyParseNode(Arrays.asList(child,LiteralParseNode.MINUS_ONE));
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
index 8f46a3b..48b215f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
@@ -503,6 +503,7 @@ public abstract class PDataType<T> implements DataType<T>, Comparable<PDataType<
   public final static Integer LONG_PRECISION = 19;
   public final static Integer SHORT_PRECISION = 5;
   public final static Integer BYTE_PRECISION = 3;
+  public final static Integer DOUBLE_PRECISION = 15;
 
   public static final int ARRAY_TYPE_BASE = 3000;
   public static final String ARRAY_TYPE_SUFFIX = "ARRAY";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDouble.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDouble.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDouble.java
index e0648f2..d11aedf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDouble.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDouble.java
@@ -27,7 +27,6 @@ import com.google.common.base.Preconditions;
 import com.google.common.primitives.Doubles;
 
 public class PDouble extends PRealNumber<Double> {
-
   public static final PDouble INSTANCE = new PDouble();
 
   private PDouble() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aea6921/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 83c984b..77c1f9e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -42,11 +43,14 @@ import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.aggregator.Aggregator;
 import org.apache.phoenix.expression.aggregator.CountAggregator;
 import org.apache.phoenix.expression.aggregator.ServerAggregators;
 import org.apache.phoenix.expression.function.TimeUnit;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.query.QueryConstants;
@@ -1584,5 +1588,47 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '.\\\\d\\\\D\\\\s\\\\S\\\\w\\\\W') = 'val'");
     }
     
+    private static void assertLiteralEquals(Object o, RowProjector p, int i) {
+        assertTrue(i < p.getColumnCount());
+        Expression e = p.getColumnProjector(i).getExpression();
+        assertTrue(e instanceof LiteralExpression);
+        LiteralExpression l = (LiteralExpression)e;
+        Object lo = l.getValue();
+        assertEquals(o, lo);
+    }
+    
+    @Test
+    public void testIntAndLongMinValue() throws Exception {
+        BigDecimal oneLessThanMinLong = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE);
+        BigDecimal oneMoreThanMaxLong = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE);
+        String query = "SELECT " + 
+            Integer.MIN_VALUE + "," + Long.MIN_VALUE + "," + 
+            (Integer.MIN_VALUE+1) + "," + (Long.MIN_VALUE+1) + "," + 
+            ((long)Integer.MIN_VALUE - 1) + "," + oneLessThanMinLong + "," +
+            Integer.MAX_VALUE + "," + Long.MAX_VALUE + "," +
+            (Integer.MAX_VALUE - 1) + "," + (Long.MAX_VALUE - 1) + "," +
+            ((long)Integer.MAX_VALUE + 1) + "," + oneMoreThanMaxLong +
+        " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " LIMIT 1";
+        List<Object> binds = Collections.emptyList();
+        QueryPlan plan = getQueryPlan(query, binds);
+        RowProjector p = plan.getProjector();
+        // Negative integers end up as longs once the * -1 occurs
+        assertLiteralEquals((long)Integer.MIN_VALUE, p, 0);
+        // Min long still stays as long
+        assertLiteralEquals(Long.MIN_VALUE, p, 1);
+        assertLiteralEquals((long)Integer.MIN_VALUE + 1, p, 2);
+        assertLiteralEquals(Long.MIN_VALUE + 1, p, 3);
+        assertLiteralEquals((long)Integer.MIN_VALUE - 1, p, 4);
+        // Can't fit into long, so becomes BigDecimal
+        assertLiteralEquals(oneLessThanMinLong, p, 5);
+        // Positive integers stay as ints
+        assertLiteralEquals(Integer.MAX_VALUE, p, 6);
+        assertLiteralEquals(Long.MAX_VALUE, p, 7);
+        assertLiteralEquals(Integer.MAX_VALUE - 1, p, 8);
+        assertLiteralEquals(Long.MAX_VALUE - 1, p, 9);
+        assertLiteralEquals((long)Integer.MAX_VALUE + 1, p, 10);
+        assertLiteralEquals(oneMoreThanMaxLong, p, 11);
+    }
+
    
 }


[14/50] [abbrv] phoenix git commit: PHOENIX-1800 Fix failing test case SaltedViewIT#testSaltedUpdatableViewWithLocalIndex(Rajeshbabu Chintaguntla)

Posted by ma...@apache.org.
PHOENIX-1800 Fix failing test case SaltedViewIT#testSaltedUpdatableViewWithLocalIndex(Rajeshbabu Chintaguntla)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1e280617
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1e280617
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1e280617

Branch: refs/heads/calcite
Commit: 1e280617c25f31658ab7c5a68de62438458a94e5
Parents: 13d6296
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Apr 2 19:26:51 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Apr 2 19:26:51 2015 +0530

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/util/IndexUtil.java     | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1e280617/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 10ca305..ca25348 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -483,12 +483,12 @@ public class IndexUtil {
             if (dataRegion != null) {
                 joinResult = dataRegion.get(get);
             } else {
-                TableName indexTable =
-                        TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(c.getEnvironment()
-                                .getRegion().getTableDesc().getName()));
+                TableName dataTable =
+                        TableName.valueOf(MetaDataUtil.getUserTableName(c.getEnvironment()
+                                .getRegion().getTableDesc().getNameAsString()));
                 HTableInterface table = null;
                 try {
-                    table = c.getEnvironment().getTable(indexTable);
+                    table = c.getEnvironment().getTable(dataTable);
                     joinResult = table.get(get);
                 } finally {
                     if (table != null) table.close();


[42/50] [abbrv] phoenix git commit: PHOENIX-1287 Use the joni byte[] regex engine in place of j.u.regex (Shuxiong Ye)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java
new file mode 100644
index 0000000..a975550
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSubstrFunction;
+import org.apache.phoenix.expression.function.RegexpSubstrFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSubstrFunction;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+/**
+ * Parse node corresponding to {@link RegexpSubstrFunction}. It also acts as a factory for creating
+ * the right kind of RegexpSubstrFunction according to setting in
+ * QueryServices.USE_BYTE_BASED_REGEX_ATTRIB
+ */
+public class RegexpSubstrParseNode extends FunctionParseNode {
+
+    RegexpSubstrParseNode(String name, List<ParseNode> children, BuiltInFunctionInfo info) {
+        super(name, children, info);
+    }
+
+    @Override
+    public Expression create(List<Expression> children, StatementContext context)
+            throws SQLException {
+        QueryServices services = context.getConnection().getQueryServices();
+        boolean useByteBasedRegex =
+                services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB,
+                    QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX);
+        if (useByteBasedRegex) {
+            return new ByteBasedRegexpSubstrFunction(children);
+        } else {
+            return new StringBasedRegexpSubstrFunction(children);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index adf146d..3c6a6c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -50,6 +50,8 @@ public interface QueryServices extends SQLCloseable {
     public static final String AUTO_COMMIT_ATTRIB = "phoenix.connection.autoCommit";
     // consistency configuration setting
     public static final String CONSISTENCY_ATTRIB = "phoenix.connection.consistency";
+    // joni byte regex engine setting
+    public static final String USE_BYTE_BASED_REGEX_ATTRIB = "phoenix.regex.byteBased";
 
     /**
 	 * max size to spool the the result into

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 884b820..5cc4fa7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -61,6 +61,7 @@ import static org.apache.phoenix.query.QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB
 import static org.apache.phoenix.query.QueryServices.STATS_USE_CURRENT_TIME_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.THREAD_POOL_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.THREAD_TIMEOUT_MS_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.USE_BYTE_BASED_REGEX_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB;
 
 import java.util.Map.Entry;
@@ -68,10 +69,8 @@ import java.util.Map.Entry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.client.Consistency;
-import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.trace.util.Tracing;
@@ -194,6 +193,8 @@ public class QueryServicesOptions {
     
     public static final String DEFAULT_CONSISTENCY_LEVEL = Consistency.STRONG.toString();
 
+    public static final boolean DEFAULT_USE_BYTE_BASED_REGEX = true;
+
     private final Configuration config;
 
     private QueryServicesOptions(Configuration config) {
@@ -247,6 +248,7 @@ public class QueryServicesOptions {
             .setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK)
             .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED)
             .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY)
+            .setIfUnset(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX)
             ;
         // HBase sets this to 1, so we reset it to something more appropriate.
         // Hopefully HBase will change this, because we can't know if a user set
@@ -450,6 +452,10 @@ public class QueryServicesOptions {
         return config.getBoolean(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED);
     }
     
+    public boolean isUseByteBasedRegex() {
+        return config.getBoolean(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX);
+    }
+
     public QueryServicesOptions setMaxServerCacheTTLMs(int ttl) {
         return set(MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, ttl);
     }
@@ -524,4 +530,8 @@ public class QueryServicesOptions {
         return this;
     }
 
+    public QueryServicesOptions setUseByteBasedRegex(boolean flag) {
+        config.setBoolean(USE_BYTE_BASED_REGEX_ATTRIB, flag);
+        return this;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index b6dce34..c6861f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -21,6 +21,8 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.text.Format;
+import java.util.LinkedList;
+import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -755,4 +757,93 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
         buf.append(']');
         return buf.toString();
     }
+
+    static public class PArrayDataTypeBytesArrayBuilder<T> {
+        static private final int BYTE_ARRAY_DEFAULT_SIZE = 128;
+
+        private PDataType baseType;
+        private SortOrder sortOrder;
+        private List<Integer> offsetPos;
+        private TrustedByteArrayOutputStream byteStream;
+        private DataOutputStream oStream;
+        private int nulls;
+
+        public PArrayDataTypeBytesArrayBuilder(PDataType baseType, SortOrder sortOrder) {
+            this.baseType = baseType;
+            this.sortOrder = sortOrder;
+            offsetPos = new LinkedList<Integer>();
+            byteStream = new TrustedByteArrayOutputStream(BYTE_ARRAY_DEFAULT_SIZE);
+            oStream = new DataOutputStream(byteStream);
+            nulls = 0;
+        }
+
+        private void close() {
+            try {
+                if (byteStream != null) byteStream.close();
+                if (oStream != null) oStream.close();
+                byteStream = null;
+                oStream = null;
+            } catch (IOException ioe) {
+            }
+        }
+
+        public boolean appendElem(byte[] bytes) {
+            return appendElem(bytes, 0, bytes.length);
+        }
+
+        public boolean appendElem(byte[] bytes, int offset, int len) {
+            if (oStream == null || byteStream == null) return false;
+            try {
+                if (!baseType.isFixedWidth()) {
+                    if (len == 0) {
+                        offsetPos.add(byteStream.size());
+                        nulls++;
+                    } else {
+                        nulls = serializeNulls(oStream, nulls);
+                        offsetPos.add(byteStream.size());
+                        if (sortOrder == SortOrder.DESC) {
+                            SortOrder.invert(bytes, offset, bytes, offset, len);
+                        }
+                        oStream.write(bytes, offset, len);
+                        oStream.write(QueryConstants.SEPARATOR_BYTE);
+                    }
+                } else {
+                    if (sortOrder == SortOrder.DESC) {
+                        SortOrder.invert(bytes, offset, bytes, offset, len);
+                    }
+                    oStream.write(bytes, offset, len);
+                }
+                return true;
+            } catch (IOException e) {
+            }
+            return false;
+        }
+
+        public byte[] getBytesAndClose() {
+            try {
+                if (!baseType.isFixedWidth()) {
+                    int noOfElements = offsetPos.size();
+                    int[] offsetPosArray = new int[noOfElements];
+                    int index = 0;
+                    for (Integer i : offsetPos) {
+                        offsetPosArray[index] = i;
+                        ++index;
+                    }
+                    PArrayDataType.writeEndSeperatorForVarLengthArray(oStream);
+                    noOfElements =
+                            PArrayDataType.serailizeOffsetArrayIntoStream(oStream, byteStream,
+                                noOfElements, offsetPosArray[offsetPosArray.length - 1],
+                                offsetPosArray);
+                    serializeHeaderInfoIntoStream(oStream, noOfElements);
+                }
+                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+                ptr.set(byteStream.getBuffer(), 0, byteStream.size());
+                return ByteUtil.copyKeyBytesIfNecessary(ptr);
+            } catch (IOException e) {
+            } finally {
+                close();
+            }
+            return null;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
index 4a7ae38..89ae43b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/StringUtil.java
@@ -115,7 +115,13 @@ public class StringUtil {
     }
 
     public static int getBytesInChar(byte b, SortOrder sortOrder) {
-    	Preconditions.checkNotNull(sortOrder);
+        int ret = getBytesInCharNoException(b, sortOrder);
+        if (ret == -1) throw new UndecodableByteException(b);
+        return ret;
+    }
+
+    private static int getBytesInCharNoException(byte b, SortOrder sortOrder) {
+        Preconditions.checkNotNull(sortOrder);
         if (sortOrder == SortOrder.DESC) {
             b = SortOrder.invert(b);
         }
@@ -128,8 +134,7 @@ public class StringUtil {
             return 3;
         if ((c & BYTES_4_MASK) == 0xF0)
             return 4;
-        // Any thing else in the first byte is invalid
-        throw new UndecodableByteException(b);
+        return -1;
     }
 
     public static int calculateUTF8Length(byte[] bytes, int offset, int length, SortOrder sortOrder) {
@@ -143,6 +148,63 @@ public class StringUtil {
         return length;
     }
 
+    // given an array of bytes containing utf-8 encoded strings, starting from curPos, ending before
+    // range, and return the next character offset, -1 if no next character available or
+    // UndecodableByteException
+    private static int calculateNextCharOffset(byte[] bytes, int curPos, int range,
+            SortOrder sortOrder) {
+        int ret = getBytesInCharNoException(bytes[curPos], sortOrder);
+        if (ret == -1) return -1;
+        ret += curPos;
+        if (ret >= range) return -1;
+        return ret;
+    }
+
+    // given an array of bytes containing utf-8 encoded strings, starting from offset, and return
+    // the previous character offset , -1 if UndecodableByteException. curPos points to current
+    // character starting offset.
+    private static int calculatePreCharOffset(byte[] bytes, int curPos, int offset,
+            SortOrder sortOrder) {
+        --curPos;
+        for (int i = 1, pos = curPos - i + 1; i <= 4 && offset <= pos; ++i, --pos) {
+            int ret = getBytesInCharNoException(bytes[pos], sortOrder);
+            if (ret == i) return pos;
+        }
+        return -1;
+    }
+
+    // return actural offsetInBytes corresponding to offsetInStr in utf-8 encoded strings bytes
+    // containing
+    // @param bytes an array of bytes containing utf-8 encoded strings
+    // @param offset
+    // @param length
+    // @param sortOrder
+    // @param offsetInStr offset for utf-8 encoded strings bytes array containing. Can be negative
+    // meaning counting from the end of encoded strings
+    // @return actural offsetInBytes corresponding to offsetInStr. -1 if offsetInStr is out of index
+    public static int calculateUTF8Offset(byte[] bytes, int offset, int length,
+            SortOrder sortOrder, int offsetInStr) {
+        if (offsetInStr == 0) return offset;
+        int ret, range = offset + length;
+        if (offsetInStr > 0) {
+            ret = offset;
+            while (offsetInStr > 0) {
+                ret = calculateNextCharOffset(bytes, ret, range, sortOrder);
+                if (ret == -1) return -1;
+                --offsetInStr;
+            }
+        } else {
+            ret = offset + length;
+            while (offsetInStr < 0) {
+                ret = calculatePreCharOffset(bytes, ret, offset, sortOrder);
+                // if calculateCurCharOffset returns -1, ret must be smaller than offset
+                if (ret < offset) return -1;
+                ++offsetInStr;
+            }
+        }
+        return ret;
+    }
+
     // Given an array of bytes containing encoding utf-8 encoded strings, the offset and a length
     // parameter, return the actual index into the byte array which would represent a substring
     // of <length> starting from the character at <offset>. We assume the <offset> is the start

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index 94b25d0..f40afc3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -731,7 +731,8 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
         assertEquals(
                 rowKeyFilter(like(
                     ENTITY_ID,
-                    likeArg)),
+                    likeArg,
+                    context)),
                 filter);
 
         byte[] startRow = ByteUtil.concat(
@@ -757,7 +758,8 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
         assertEquals(
                 rowKeyFilter(like(
                     ENTITY_ID,
-                    likeArg)),
+                    likeArg,
+                    context)),
                 filter);
 
         byte[] startRow = ByteUtil.concat(
@@ -783,7 +785,8 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
         assertEquals(
                 rowKeyFilter(like(
                     substr(ENTITY_ID,1,10),
-                    likeArg)),
+                    likeArg,
+                    context)),
                 filter);
 
         byte[] startRow = ByteUtil.concat(
@@ -809,7 +812,8 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
         assertEquals(
                 rowKeyFilter(like(
                     substr(ENTITY_ID,4,10),
-                    likeArg)),
+                    likeArg,
+                    context)),
                 filter);
 
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);
@@ -832,7 +836,8 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
         assertEquals(
                 rowKeyFilter(like(
                     ENTITY_ID,
-                    likeArg)),
+                    likeArg,
+                    context)),
                 filter);
 
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);
@@ -855,7 +860,8 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
         assertEquals(
                 rowKeyFilter(not(like(
                     ENTITY_ID,
-                    likeArg))),
+                    likeArg,
+                    context))),
                 filter);
 
         byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java
index 3033edf..e66ad13 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java
@@ -20,24 +20,40 @@ package org.apache.phoenix.expression;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.parse.LikeParseNode.LikeType;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.junit.Test;
 
 public class ILikeExpressionTest {
-    public boolean testExpression (String value, String expression) {
-      LiteralExpression v = LiteralExpression.newConstant(value);
-      LiteralExpression p = LiteralExpression.newConstant(expression);
+    private boolean testExpression (String value, String expression, SortOrder sortorder)
+            throws SQLException {
+      LiteralExpression v = LiteralExpression.newConstant(value, PVarchar.INSTANCE, sortorder);
+      LiteralExpression p = LiteralExpression.newConstant(expression, PVarchar.INSTANCE, sortorder);
       List<Expression> children = Arrays.<Expression>asList(v,p);
-      LikeExpression e = LikeExpression.create(children, LikeType.CASE_INSENSITIVE);
+      LikeExpression e1 = ByteBasedLikeExpression.create(children, LikeType.CASE_INSENSITIVE);
+      LikeExpression e2 = StringBasedLikeExpression.create(children, LikeType.CASE_INSENSITIVE);
       ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-      boolean evaluated = e.evaluate(null, ptr);
-      Boolean result = (Boolean)e.getDataType().toObject(ptr);
-      assertTrue(evaluated);
-      return result;
+      boolean evaluated1 = e1.evaluate(null, ptr);
+      Boolean result1 = (Boolean)e1.getDataType().toObject(ptr);
+      assertTrue(evaluated1);
+      boolean evaluated2 = e2.evaluate(null, ptr);
+      Boolean result2 = (Boolean)e2.getDataType().toObject(ptr);
+      assertTrue(evaluated2);
+      assertEquals(result1, result2);
+      return result1;
+    }
+
+    private boolean testExpression(String value, String expression) throws SQLException {
+        boolean result1 = testExpression(value, expression, SortOrder.ASC);
+        boolean result2 = testExpression(value, expression, SortOrder.DESC);
+        assertEquals(result1, result2);
+        return result1;
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java
index 27e6547..0bf8b06 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java
@@ -20,25 +20,42 @@ package org.apache.phoenix.expression;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.parse.LikeParseNode.LikeType;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.junit.Test;
 
 public class LikeExpressionTest {
-    public boolean testExpression (String value, String expression) {
-      LiteralExpression v = LiteralExpression.newConstant(value);
-      LiteralExpression p = LiteralExpression.newConstant(expression);
+    private boolean testExpression(String value, String expression, SortOrder sortorder)
+            throws SQLException {
+      LiteralExpression v = LiteralExpression.newConstant(value, PVarchar.INSTANCE, sortorder);
+      LiteralExpression p = LiteralExpression.newConstant(expression, PVarchar.INSTANCE, sortorder);
       List<Expression> children = Arrays.<Expression>asList(v,p);
-      LikeExpression e = LikeExpression.create(children, LikeType.CASE_SENSITIVE);
+      LikeExpression e1 = ByteBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE);
+      LikeExpression e2 = StringBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE);
       ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-      boolean evaluated = e.evaluate(null, ptr);
-      Boolean result = (Boolean)e.getDataType().toObject(ptr);
-      assertTrue(evaluated);
-      return result;
+      boolean evaluated1 = e1.evaluate(null, ptr);
+      Boolean result1 = (Boolean)e1.getDataType().toObject(ptr);
+      assertTrue(evaluated1);
+      boolean evaluated2 = e2.evaluate(null, ptr);
+      Boolean result2 = (Boolean)e2.getDataType().toObject(ptr);
+      assertTrue(evaluated2);
+      assertEquals(result1, result2);
+      return result1;
     }
+
+    private boolean testExpression(String value, String expression) throws SQLException {
+        boolean result1 = testExpression(value, expression, SortOrder.ASC);
+        boolean result2 = testExpression(value, expression, SortOrder.DESC);
+        assertEquals(result1, result2);
+        return result1;
+    }
+
     @Test
     public void testStartWildcard() throws Exception {
         assertEquals(Boolean.FALSE, testExpression ("149na7-app1-2-", "%-w"));
@@ -58,4 +75,10 @@ public class LikeExpressionTest {
         assertEquals(Boolean.TRUE, testExpression ("test", "%s%"));
         assertEquals(Boolean.FALSE, testExpression ("test", "%S%"));
     }
+
+    @Test
+    public void testEmptySourceStr() throws Exception {
+        assertEquals(Boolean.TRUE, testExpression ("", "%"));
+        assertEquals(Boolean.FALSE, testExpression ("", "_"));
+    }
  }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java
new file mode 100644
index 0000000..ad11c1b
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ByteBasedRegexpReplaceFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpReplaceFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class RegexpReplaceFunctionTest {
+    private final static PVarchar TYPE = PVarchar.INSTANCE;
+
+    private String evalExp(Expression exp) {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        boolean eval = exp.evaluate(null, ptr);
+        assertTrue(eval);
+        String res = (String) exp.getDataType().toObject(ptr);
+        return res;
+    }
+
+    private String testExpression(String srcStr, String patternStr, String replaceStr,
+            SortOrder sortOrder) throws SQLException {
+        Expression srcExp, patternExp, replaceExp;
+        srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder);
+        patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder);
+        replaceExp = LiteralExpression.newConstant(replaceStr, TYPE, sortOrder);
+        List<Expression> expressions = Lists.newArrayList(srcExp, patternExp, replaceExp);
+        String res1, res2;
+        res1 = evalExp(new ByteBasedRegexpReplaceFunction(expressions));
+        res2 = evalExp(new StringBasedRegexpReplaceFunction(expressions));
+        assertEquals(res1, res2);
+        return res1;
+    }
+
+    private String testExpression(String srcStr, String patternStr, String replaceStr)
+            throws SQLException {
+        String result1 = testExpression(srcStr, patternStr, replaceStr, SortOrder.ASC);
+        String result2 = testExpression(srcStr, patternStr, replaceStr, SortOrder.DESC);
+        assertEquals(result1, result2);
+        return result1;
+    }
+
+    private void testExpression(String srcStr, String patternStr, String replaceStr,
+            String expectedStr) throws SQLException {
+        String result = testExpression(srcStr, patternStr, replaceStr);
+        assertEquals(expectedStr, result);
+    }
+
+    @Test
+    public void test() throws Exception {
+        testExpression("aa11bb22cc33dd44ee", "[0-9]+", "*", "aa*bb*cc*dd*ee");
+        testExpression("aa11bb22cc33dd44ee", "[0-9]+", "", "aabbccddee");
+        testExpression("aa11bb22cc33dd44ee", "[a-z][0-9]", "", "a1b2c3d4ee");
+        testExpression("aa11bb22cc33dd44ee", "[a-z0-9]+", "", (String) null);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java
new file mode 100644
index 0000000..6157ce0
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSplitFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSplitFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class RegexpSplitFunctionTest {
+    private final static PVarchar TYPE = PVarchar.INSTANCE;
+
+    private String[] evalExp(Expression exp) throws SQLException {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        boolean eval = exp.evaluate(null, ptr);
+        assertTrue(eval);
+        PhoenixArray evalRes = (PhoenixArray) exp.getDataType().toObject(ptr);
+        String[] res = (String[]) evalRes.getArray();
+        return res;
+    }
+
+    private String[] testExpression(String srcStr, String patternStr, SortOrder sortOrder)
+            throws SQLException {
+        Expression srcExp, patternExp;
+        srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder);
+        patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder);
+        List<Expression> expressions = Lists.newArrayList(srcExp, patternExp);
+        String[] res1, res2;
+        res1 = evalExp(new ByteBasedRegexpSplitFunction(expressions));
+        res2 = evalExp(new StringBasedRegexpSplitFunction(expressions));
+        testEqual(res2, res1);
+        return res1;
+    }
+
+    private String[] testExpression(String srcStr, String patternStr) throws SQLException {
+        String[] result1 = testExpression(srcStr, patternStr, SortOrder.ASC);
+        String[] result2 = testExpression(srcStr, patternStr, SortOrder.DESC);
+        testEqual(result1, result2);
+        return result1;
+    }
+
+    private void testEqual(String[] expectedStr, String[] result) {
+        if (result == null ^ expectedStr == null) return;
+        if (expectedStr == null) return;
+        assertEquals(expectedStr.length, result.length);
+        for (int i = 0; i < expectedStr.length; ++i)
+            assertEquals(expectedStr[i], result[i]);
+    }
+
+    private void testExpression(String srcStr, String patternStr, String[] expectedStr)
+            throws SQLException {
+        String[] result = testExpression(srcStr, patternStr);
+        testEqual(expectedStr, result);
+    }
+
+    @Test
+    public void test() throws Exception {
+        String[] res = new String[] { "ONE", "TWO", "THREE" };
+        testExpression("ONE:TWO:THREE", ":", res);
+        testExpression("ONE,TWO,THREE", ",", res);
+        testExpression("12ONE34TWO56THREE78", "[0-9]+", new String[] { null, "ONE", "TWO", "THREE",
+                null });
+        testExpression("ONE34TWO56THREE78", "[0-9]+", new String[] { "ONE", "TWO", "THREE", null });
+        testExpression("123ONE34TWO56THREE", "[0-9]+", new String[] { null, "ONE", "TWO", "THREE" });
+        testExpression("123", "[0-9]+", new String[] { null, null });
+        testExpression("ONE", "[0-9]+", new String[] { "ONE" });
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java
new file mode 100644
index 0000000..c2889b3
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSubstrFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSubstrFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class RegexpSubstrFunctionTest {
+    private final static PVarchar TYPE = PVarchar.INSTANCE;
+
+    private String evalExp(Expression exp) {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        boolean eval = exp.evaluate(null, ptr);
+        assertTrue(eval);
+        String res = (String) exp.getDataType().toObject(ptr);
+        return res;
+    }
+
+    private String testExpression(String srcStr, String patternStr, int offset, SortOrder sortOrder) throws SQLException {
+        Expression srcExp, patternExp, offsetExp;
+        srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder);
+        patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder);
+        offsetExp = LiteralExpression.newConstant(offset, PInteger.INSTANCE, sortOrder);
+        List<Expression> expressions = Lists.newArrayList(srcExp, patternExp, offsetExp);
+        String res1, res2;
+        res1 = evalExp(new ByteBasedRegexpSubstrFunction(expressions));
+        res2 = evalExp(new StringBasedRegexpSubstrFunction(expressions));
+        assertEquals(res1, res2);
+        return res1;
+    }
+
+    private String testExpression(String srcStr, String patternStr, int offset) throws SQLException {
+        String result1 = testExpression(srcStr, patternStr, offset, SortOrder.ASC);
+        String result2 = testExpression(srcStr, patternStr, offset, SortOrder.DESC);
+        assertEquals(result1, result2);
+        return result1;
+    }
+
+    private void testExpression(String srcStr, String patternStr, int offset, String expectedStr)
+            throws SQLException {
+        String result = testExpression(srcStr, patternStr, offset);
+        assertEquals(expectedStr, result);
+    }
+
+    @Test
+    public void test() throws Exception {
+        testExpression("Report1?1", "[^\\\\?]+", 1, "Report1");
+        testExpression("Report1?2", "[^\\\\?]+", 1, "Report1");
+        testExpression("Report2?1", "[^\\\\?]+", 1, "Report2");
+        testExpression("Report3?2", "[^\\\\?]+", 1, "Report3");
+        testExpression("Report3?2", "[4-9]+", 0, (String) null);
+        testExpression("Report3?2", "[^\\\\?]+", 2, "eport3");
+        testExpression("Report3?2", "[^\\\\?]+", -5, "rt3");
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
index 8fb1a6c..b9ee0eb 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java
@@ -30,16 +30,18 @@ import java.util.TimeZone;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ByteBasedRegexpReplaceFunction;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSubstrFunction;
 import org.apache.phoenix.expression.function.FunctionArgumentType;
 import org.apache.phoenix.expression.function.LTrimFunction;
 import org.apache.phoenix.expression.function.LengthFunction;
 import org.apache.phoenix.expression.function.LowerFunction;
 import org.apache.phoenix.expression.function.LpadFunction;
 import org.apache.phoenix.expression.function.RTrimFunction;
-import org.apache.phoenix.expression.function.RegexpReplaceFunction;
-import org.apache.phoenix.expression.function.RegexpSubstrFunction;
 import org.apache.phoenix.expression.function.RoundDateExpression;
 import org.apache.phoenix.expression.function.SqlTypeNameFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpReplaceFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSubstrFunction;
 import org.apache.phoenix.expression.function.SubstrFunction;
 import org.apache.phoenix.expression.function.ToCharFunction;
 import org.apache.phoenix.expression.function.ToDateFunction;
@@ -80,13 +82,15 @@ public class SortOrderExpressionTest {
     @Test
     public void regexpSubstr() throws Exception {
         List<Expression> args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), getLiteral("l.h"), getLiteral(2));
-        evaluateAndAssertResult(new RegexpSubstrFunction(args), "lah");
+        evaluateAndAssertResult(new StringBasedRegexpSubstrFunction(args), "lah");
+        evaluateAndAssertResult(new ByteBasedRegexpSubstrFunction(args), "lah");
     }
     
     @Test
     public void regexpReplace() throws Exception {
         List<Expression> args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), getLiteral("l.h"), getLiteral("foo"));
-        evaluateAndAssertResult(new RegexpReplaceFunction(args), "bfoo");
+        evaluateAndAssertResult(new ByteBasedRegexpReplaceFunction(args), "bfoo");
+        evaluateAndAssertResult(new StringBasedRegexpReplaceFunction(args), "bfoo");
     }
     
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java
new file mode 100644
index 0000000..908c662
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.util.regex;
+
+import static org.junit.Assert.assertTrue;
+
+import java.sql.SQLException;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.schema.types.PVarcharArray;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.junit.Test;
+
+public class PatternPerformanceTest {
+
+    static private class Timer {
+        private long startTimeStamp;
+
+        public void reset() {
+            startTimeStamp = System.currentTimeMillis();
+        }
+
+        public double currentTime() {
+            return (System.currentTimeMillis() - startTimeStamp) / 1000.0;
+        }
+
+        public void printTime(String hint) {
+            System.out.println(hint + " Time=" + currentTime());
+        }
+    }
+
+    private String[] data = new String[] { "ONE:TWO:THREE", "ABC:DEF", "PKU:THU:FDU" };
+    private ImmutableBytesWritable[] dataPtr = new ImmutableBytesWritable[] { getPtr(data[0]),
+            getPtr(data[1]), getPtr(data[2]) };
+    private String patternString;
+    private ImmutableBytesWritable resultPtr = new ImmutableBytesWritable();
+    private int maxTimes = 10000000;
+    private Timer timer = new Timer();
+    private final boolean ENABLE_ASSERT = false;
+
+    private static ImmutableBytesWritable getPtr(String str) {
+        return new ImmutableBytesWritable(PVarchar.INSTANCE.toBytes(str));
+    }
+
+    private void testReplaceAll(ImmutableBytesWritable replacePtr, AbstractBasePattern pattern,
+            String name) {
+        timer.reset();
+        for (int i = 0; i < maxTimes; ++i) {
+            pattern.replaceAll(dataPtr[i % 3], replacePtr, resultPtr);
+            if (ENABLE_ASSERT) {
+                String result = (String) PVarchar.INSTANCE.toObject(resultPtr);
+                assertTrue((i % 3 == 1 && ":".equals(result))
+                        || (i % 3 != 1 && "::".equals(result)));
+            }
+        }
+        timer.printTime(name);
+    }
+
+    public void testReplaceAll() {
+        patternString = "[A-Z]+";
+        ImmutableBytesWritable replacePtr = getPtr("");
+        testReplaceAll(replacePtr, new JavaPattern(patternString), "Java replaceAll");
+        testReplaceAll(replacePtr, new JONIPattern(patternString), "JONI replaceAll");
+    }
+
+    private void testLike(AbstractBasePattern pattern, String name) {
+        timer.reset();
+        for (int i = 0; i < maxTimes; ++i) {
+            pattern.matches(dataPtr[i % 3], resultPtr);
+            if (ENABLE_ASSERT) {
+                Boolean b = (Boolean) PBoolean.INSTANCE.toObject(resultPtr);
+                assertTrue(i % 3 != 2 || b.booleanValue());
+            }
+        }
+        timer.printTime(name);
+    }
+
+    public void testLike() {
+        patternString = "\\Q\\E.*\\QU\\E.*\\QU\\E.*\\QU\\E.*\\Q\\E";
+        testLike(new JavaPattern(patternString), "Java Like");
+        testLike(new JONIPattern(patternString), "JONI Like");
+    }
+
+    private void testSubstr(AbstractBasePattern pattern, String name) {
+        timer.reset();
+        for (int i = 0; i < maxTimes; ++i) {
+            boolean ret = pattern.substr(dataPtr[i % 3], 0, resultPtr);
+            if (ENABLE_ASSERT) {
+                assertTrue(ret
+                        && (i % 3 != 2 || ":THU".equals(PVarchar.INSTANCE.toObject(resultPtr))));
+            }
+        }
+        timer.printTime(name);
+    }
+
+    public void testSubstr() {
+        patternString = "\\:[A-Z]+";
+        testSubstr(new JavaPattern(patternString), "Java Substr");
+        testSubstr(new JONIPattern(patternString), "JONI Substr");
+    }
+
+    private void testSplit(AbstractBaseSplitter pattern, String name) throws SQLException {
+        timer.reset();
+        for (int i = 0; i < maxTimes; ++i) {
+            boolean ret = pattern.split(dataPtr[i % 3], resultPtr);
+            if (ENABLE_ASSERT) {
+                PhoenixArray array = (PhoenixArray) PVarcharArray.INSTANCE.toObject(resultPtr);
+                assertTrue(ret && (i % 3 != 1 || ((String[]) array.getArray()).length == 2));
+            }
+        }
+        timer.printTime(name);
+    }
+
+    public void testSplit() throws SQLException {
+        patternString = "\\:";
+        testSplit(new GuavaSplitter(patternString), "GuavaSplit");
+        testSplit(new JONIPattern(patternString), "JONI Split");
+    }
+
+    @Test
+    public void test() throws Exception {
+        // testLike();
+        // testReplaceAll();
+        // testSubstr();
+        // testSplit();
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java
index 9c218fb..6d00562 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java
@@ -17,7 +17,9 @@
 package org.apache.phoenix.util;
 
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 
+import org.apache.phoenix.schema.SortOrder;
 import org.junit.Test;
 
 public class StringUtilTest {
@@ -48,5 +50,33 @@ public class StringUtilTest {
     public void testLpadZeroPadding() throws Exception {
         testLpad("ABCD", 4, "1234", "ABCD");
     }
-    
+
+    @Test
+    public void testCalculateUTF8Offset() throws Exception {
+        String tmp, padding = "padding", data = "零一二三四五六七八九", trailing = "trailing";
+        byte[] bytes = (padding + data + trailing).getBytes();
+        int ret, offset = padding.getBytes().length, length = data.getBytes().length;
+
+        tmp = padding;
+        for (int i = 0; i < data.length(); ++i) {
+            ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i);
+            assertEquals(tmp.getBytes().length, ret);
+            tmp = tmp + data.charAt(i);
+        }
+        for (int i = data.length(); i < data.length() + 10; ++i) {
+            ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i);
+            assertEquals(-1, ret);
+        }
+
+        for (int i = -data.length() - 10; i < -data.length(); ++i) {
+            ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i);
+            assertEquals(-1, ret);
+        }
+        tmp = padding;
+        for (int i = -data.length(); i <= -1; ++i) {
+            ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i);
+            assertEquals("i=" + i, tmp.getBytes().length, ret);
+            tmp = tmp + data.charAt(i + data.length());
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 872c318..66695f8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -56,15 +56,16 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheRequest
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import org.apache.phoenix.expression.AndExpression;
+import org.apache.phoenix.expression.ByteBasedLikeExpression;
 import org.apache.phoenix.expression.ComparisonExpression;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.InListExpression;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
-import org.apache.phoenix.expression.LikeExpression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.NotExpression;
 import org.apache.phoenix.expression.OrExpression;
 import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.expression.StringBasedLikeExpression;
 import org.apache.phoenix.expression.function.SubstrFunction;
 import org.apache.phoenix.filter.MultiCQKeyValueComparisonFilter;
 import org.apache.phoenix.filter.MultiKeyValueComparisonFilter;
@@ -77,6 +78,8 @@ import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.parse.LikeParseNode.LikeType;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.TableRef;
@@ -264,13 +267,26 @@ public class TestUtil {
         return  new ComparisonExpression(Arrays.asList(e, LiteralExpression.newConstant(o)), op);
     }
 
-    public static Expression like(Expression e, Object o) {
-        return LikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_SENSITIVE);
+    private static boolean useByteBasedRegex(StatementContext context) {
+        return context
+                .getConnection()
+                .getQueryServices()
+                .getProps()
+                .getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB,
+                    QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX);
     }
 
-    public static Expression ilike(Expression e, Object o) {
-      return LikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_INSENSITIVE);
-  }
+    public static Expression like(Expression e, Object o, StatementContext context) {
+        return useByteBasedRegex(context)?
+               ByteBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_SENSITIVE):
+               StringBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_SENSITIVE);
+    }
+
+    public static Expression ilike(Expression e, Object o, StatementContext context) {
+        return useByteBasedRegex(context)?
+                ByteBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_INSENSITIVE):
+                StringBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_INSENSITIVE);
+    }
 
     public static Expression substr(Expression e, Object offset, Object length) {
         return  new SubstrFunction(Arrays.asList(e, LiteralExpression.newConstant(offset), LiteralExpression.newConstant(length)));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 4793cf2..c2ff589 100644
--- a/pom.xml
+++ b/pom.xml
@@ -105,6 +105,7 @@
     <htrace.version>3.1.0-incubating</htrace.version>
     <collections.version>3.2.1</collections.version>
     <jodatime.version>2.7</jodatime.version>
+    <joni.version>2.1.2</joni.version>
 
     <!-- Test Dependencies -->
     <mockito-all.version>1.8.5</mockito-all.version>


[07/50] [abbrv] phoenix git commit: PHOENIX-1722 Speedup CONVERT_TZ function (Vaclav Loffelmann)

Posted by ma...@apache.org.
PHOENIX-1722 Speedup CONVERT_TZ function (Vaclav Loffelmann)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b9002b7c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b9002b7c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b9002b7c

Branch: refs/heads/calcite
Commit: b9002b7caa54c4fde04b27fe6963719a8d821d7c
Parents: a7d7dfb
Author: Samarth <sa...@salesforce.com>
Authored: Fri Mar 27 14:58:40 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Fri Mar 27 14:58:40 2015 -0700

----------------------------------------------------------------------
 .../end2end/ConvertTimezoneFunctionIT.java      | 24 +++++-
 .../apache/phoenix/cache/JodaTimezoneCache.java | 84 ++++++++++++++++++++
 .../function/ConvertTimezoneFunction.java       | 38 +++------
 .../function/TimezoneOffsetFunction.java        | 25 ++----
 .../phoenix/cache/JodaTimezoneCacheTest.java    | 51 ++++++++++++
 pom.xml                                         |  2 +-
 6 files changed, 173 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9002b7c/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
index d89a03b..f415dc6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
@@ -23,8 +23,10 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
+import static org.junit.Assert.assertFalse;
 import org.junit.Test;
 
 /**
@@ -129,7 +131,7 @@ public class ConvertTimezoneFunctionIT extends BaseHBaseManagedTimeIT {
         try {
             ResultSet rs = conn.createStatement().executeQuery(
                     "SELECT k1, dates, CONVERT_TZ(dates, 'UNKNOWN_TIMEZONE', 'America/Adak') FROM TIMEZONE_OFFSET_TEST");
-    
+
             rs.next();
             rs.getDate(3).getTime();
             fail();
@@ -137,4 +139,24 @@ public class ConvertTimezoneFunctionIT extends BaseHBaseManagedTimeIT {
             assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), e.getErrorCode());
         }
     }
+
+	@Test
+	public void testConvertMultipleRecords() throws Exception {
+		Connection conn = DriverManager.getConnection(getUrl());
+		String ddl = "CREATE TABLE IF NOT EXISTS TIMEZONE_OFFSET_TEST (k1 INTEGER NOT NULL, dates DATE CONSTRAINT pk PRIMARY KEY (k1))";
+		Statement stmt = conn.createStatement();
+		stmt.execute(ddl);
+		stmt.execute("UPSERT INTO TIMEZONE_OFFSET_TEST (k1, dates) VALUES (1, TO_DATE('2014-03-01 00:00:00'))");
+		stmt.execute("UPSERT INTO TIMEZONE_OFFSET_TEST (k1, dates) VALUES (2, TO_DATE('2014-03-01 00:00:00'))");
+		conn.commit();
+
+		ResultSet rs = stmt.executeQuery(
+				"SELECT k1, dates, CONVERT_TZ(dates, 'UTC', 'America/Adak') FROM TIMEZONE_OFFSET_TEST");
+
+		assertTrue(rs.next());
+		assertEquals(1393596000000L, rs.getDate(3).getTime()); //Fri, 28 Feb 2014 14:00:00
+		assertTrue(rs.next());
+		assertEquals(1393596000000L, rs.getDate(3).getTime()); //Fri, 28 Feb 2014 14:00:00
+		assertFalse(rs.next());
+	}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9002b7c/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
new file mode 100644
index 0000000..54904d7
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.IllegalDataException;
+import org.joda.time.DateTimeZone;
+
+public class JodaTimezoneCache {
+
+    public static final int CACHE_EXPRIRE_TIME_MINUTES = 10;
+    private static final LoadingCache<ByteBuffer, DateTimeZone> cachedJodaTimeZones = createTimezoneCache();
+
+    /**
+     * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+     *
+     * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+     * @return joda's DateTimeZone instance
+     * @throws IllegalDataException if unknown timezone id is passed
+     */
+    public static DateTimeZone getInstance(ByteBuffer timezoneId) {
+        try {
+            return cachedJodaTimeZones.get(timezoneId);
+        } catch (ExecutionException ex) {
+            throw new IllegalDataException(ex);
+        } catch (UncheckedExecutionException e) {
+            throw new IllegalDataException("Unknown timezone " + Bytes.toString(timezoneId.array()));
+        }
+    }
+
+    /**
+     * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+     *
+     * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+     * @return joda's DateTimeZone instance
+     * @throws IllegalDataException if unknown timezone id is passed
+     */
+    public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) {
+        return getInstance(ByteBuffer.wrap(timezoneId.copyBytes()));
+    }
+
+    /**
+     * Returns joda's DateTimeZone instance from cache or create new instance and cache it.
+     *
+     * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+     * @return joda's DateTimeZone instance
+     * @throws IllegalDataException if unknown timezone id is passed
+     */
+    public static DateTimeZone getInstance(String timezoneId) {
+        return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId)));
+    }
+
+    private static LoadingCache<ByteBuffer, DateTimeZone> createTimezoneCache() {
+        return CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, TimeUnit.MINUTES).build(new CacheLoader<ByteBuffer, DateTimeZone>() {
+
+            @Override
+            public DateTimeZone load(ByteBuffer timezone) throws Exception {
+                return DateTimeZone.forID(Bytes.toString(timezone.array()));
+            }
+        });
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9002b7c/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
index dcde31f..3ea47a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
@@ -15,21 +15,17 @@
  */
 package org.apache.phoenix.expression.function;
 
-import java.sql.Date;
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.cache.JodaTimezoneCache;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.joda.time.DateTimeZone;
 
 /**
  * Build in function CONVERT_TZ(date, 'timezone_from', 'timezone_to). Convert date from one timezone to
@@ -43,7 +39,6 @@ import org.apache.phoenix.schema.tuple.Tuple;
 public class ConvertTimezoneFunction extends ScalarFunction {
 
     public static final String NAME = "CONVERT_TZ";
-    private final Map<String, TimeZone> cachedTimeZones = new HashMap<String, TimeZone>();
 
     public ConvertTimezoneFunction() {
     }
@@ -62,40 +57,25 @@ public class ConvertTimezoneFunction extends ScalarFunction {
         if (!children.get(0).evaluate(tuple, ptr)) {
             return false;
         }
-
-        Date dateo = (Date) PDate.INSTANCE.toObject(ptr, children.get(0).getSortOrder());
-        Long date = dateo.getTime();
+        long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(0).getSortOrder());
 
         if (!children.get(1).evaluate(tuple, ptr)) {
             return false;
         }
-        TimeZone timezoneFrom = getTimezoneFromCache(Bytes.toString(ptr.get(), ptr.getOffset(), ptr.getLength()));
+        DateTimeZone timezoneFrom = JodaTimezoneCache.getInstance(ptr);
 
         if (!children.get(2).evaluate(tuple, ptr)) {
             return false;
         }
-        TimeZone timezoneTo = TimeZone.getTimeZone(Bytes.toString(ptr.get(), ptr.getOffset(), ptr.getLength()));
-
-        long dateInUtc = date - timezoneFrom.getOffset(date);
-        long dateInTo = dateInUtc + timezoneTo.getOffset(dateInUtc);
-
-        ptr.set(PDate.INSTANCE.toBytes(new Date(dateInTo)));
+        DateTimeZone timezoneTo = JodaTimezoneCache.getInstance(ptr);
 
+        long convertedDate = date - timezoneFrom.getOffset(date) + timezoneTo.getOffset(date);
+        byte[] outBytes = new byte[8];
+        PDate.INSTANCE.getCodec().encodeLong(convertedDate, outBytes, 0);
+        ptr.set(outBytes);
         return true;
     }
 
-    private TimeZone getTimezoneFromCache(String timezone) throws IllegalDataException {
-        if (!cachedTimeZones.containsKey(timezone)) {
-            TimeZone tz = TimeZone.getTimeZone(timezone);
-            if (!tz.getID().equals(timezone)) {
-                throw new IllegalDataException("Invalid timezone " + timezone);
-            }
-            cachedTimeZones.put(timezone, tz);
-            return tz;
-        }
-        return cachedTimeZones.get(timezone);
-    }
-
     @Override
     public PDataType getDataType() {
         return PDate.INSTANCE;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9002b7c/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
index 2cfbc25..8c70346 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java
@@ -18,22 +18,18 @@
 
 package org.apache.phoenix.expression.function;
 
-import java.sql.Date;
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.cache.JodaTimezoneCache;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.types.PDate;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.joda.time.DateTimeZone;
 
 /**
  * Returns offset (shift in minutes) of timezone at particular datetime in minutes.
@@ -45,7 +41,6 @@ public class TimezoneOffsetFunction extends ScalarFunction {
 
     public static final String NAME = "TIMEZONE_OFFSET";
     private static final int MILLIS_TO_MINUTES = 60 * 1000;
-    private final Map<String, TimeZone> cachedTimeZones = new HashMap<String, TimeZone>();
 
     public TimezoneOffsetFunction() {
     }
@@ -64,24 +59,14 @@ public class TimezoneOffsetFunction extends ScalarFunction {
         if (!children.get(0).evaluate(tuple, ptr)) {
             return false;
         }
-
-        String timezone = Bytes.toString(ptr.get(), ptr.getOffset(), ptr.getLength());
+        DateTimeZone timezoneInstance = JodaTimezoneCache.getInstance(ptr);
 
         if (!children.get(1).evaluate(tuple, ptr)) {
             return false;
         }
+        long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(1).getSortOrder());
 
-        if (!cachedTimeZones.containsKey(timezone)) {
-            TimeZone tz = TimeZone.getTimeZone(timezone);
-            if (!tz.getID().equals(timezone)) {
-                throw new IllegalDataException("Invalid timezone " + timezone);
-            }
-            cachedTimeZones.put(timezone, tz);
-        }
-
-		Date date = (Date) PDate.INSTANCE.toObject(ptr, children.get(1).getSortOrder());
-		int offset = cachedTimeZones.get(timezone).getOffset(date.getTime());
-
+        int offset = timezoneInstance.getOffset(date);
         ptr.set(PInteger.INSTANCE.toBytes(offset / MILLIS_TO_MINUTES));
         return true;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9002b7c/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
new file mode 100644
index 0000000..f388703
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import java.nio.ByteBuffer;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.IllegalDataException;
+import org.joda.time.DateTimeZone;
+import static org.junit.Assert.assertTrue;
+import org.junit.Test;
+
+public class JodaTimezoneCacheTest {
+
+    @Test
+    public void testGetInstanceByteBufferUTC() {
+        DateTimeZone instance = JodaTimezoneCache.getInstance(ByteBuffer.wrap(Bytes.toBytes("UTC")));
+        assertTrue(instance instanceof DateTimeZone);
+    }
+
+    @Test
+    public void testGetInstanceString() {
+        DateTimeZone instance = JodaTimezoneCache.getInstance("America/St_Vincent");
+        assertTrue(instance instanceof DateTimeZone);
+    }
+
+    @Test(expected = IllegalDataException.class)
+    public void testGetInstanceStringUnknown() {
+        JodaTimezoneCache.getInstance("SOME_UNKNOWN_TIMEZONE");
+    }
+
+    @Test
+    public void testGetInstanceImmutableBytesWritable() {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable(Bytes.toBytes("Europe/Isle_of_Man"));
+        DateTimeZone instance = JodaTimezoneCache.getInstance(ptr);
+        assertTrue(instance instanceof DateTimeZone);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b9002b7c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ecfd3ec..861c868 100644
--- a/pom.xml
+++ b/pom.xml
@@ -103,7 +103,7 @@
     <commons-codec.version>1.7</commons-codec.version>
     <htrace.version>3.1.0-incubating</htrace.version>
     <collections.version>3.2.1</collections.version>
-    <jodatime.version>2.3</jodatime.version>
+    <jodatime.version>2.7</jodatime.version>
 
     <!-- Test Dependencies -->
     <mockito-all.version>1.8.5</mockito-all.version>


[39/50] [abbrv] phoenix git commit: PHOENIX-1861 Padding character should be inverted if sort order is descending (Dumindu Buddhika)

Posted by ma...@apache.org.
PHOENIX-1861 Padding character should be inverted if sort order is
descending (Dumindu Buddhika)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1b45110d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1b45110d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1b45110d

Branch: refs/heads/calcite
Commit: 1b45110db66cbe40871aeea6025aef5ff7ef2682
Parents: 0d78e48
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Apr 14 23:23:49 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Apr 14 23:23:49 2015 +0530

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/compile/WhereOptimizer.java | 2 +-
 .../src/main/java/org/apache/phoenix/schema/PTableImpl.java      | 2 +-
 .../src/main/java/org/apache/phoenix/schema/types/PBinary.java   | 2 +-
 .../src/main/java/org/apache/phoenix/schema/types/PChar.java     | 4 ++--
 .../src/main/java/org/apache/phoenix/schema/types/PDataType.java | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b45110d/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index b03793d..e25cfbc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -1210,7 +1210,7 @@ public class WhereOptimizer {
                     Integer length = getColumn().getMaxLength();
                     if (length != null) {
                         // Go through type to pad as the fill character depends on the type.
-                        type.pad(ptr, length);
+                        type.pad(ptr, length, SortOrder.getDefault());
                     }
                 }
                 byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b45110d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 8ce4183..702edbd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -703,7 +703,7 @@ public class PTableImpl implements PTable {
                 Integer	maxLength = column.getMaxLength();
             	if (!isNull && type.isFixedWidth() && maxLength != null) {
     				if (ptr.getLength() <= maxLength) {
-                        type.pad(ptr, maxLength);
+                        type.pad(ptr, maxLength, SortOrder.getDefault());
                     } else if (ptr.getLength() > maxLength) {
                         throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " bytes (" + type.toObject(byteValue) + ")");
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b45110d/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
index 25604a3..d6d07fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
@@ -35,7 +35,7 @@ public class PBinary extends PDataType<byte[]> {
   }
 
   @Override
-  public void pad(ImmutableBytesWritable ptr, Integer maxLength) {
+  public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) {
     if (ptr.getLength() >= maxLength) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b45110d/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
index 48d47d3..2effc38 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
@@ -41,13 +41,13 @@ public class PChar extends PDataType<String> {
   }
 
     @Override
-    public void pad(ImmutableBytesWritable ptr, Integer maxLength) {
+    public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) {
       if (ptr.getLength() >= maxLength) {
         return;
       }
       byte[] newBytes = new byte[maxLength];
       System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength());
-      Arrays.fill(newBytes, ptr.getLength(), maxLength, StringUtil.SPACE_UTF8);
+      Arrays.fill(newBytes, ptr.getLength(), maxLength, sortOrder == SortOrder.ASC ? StringUtil.SPACE_UTF8 : StringUtil.INVERTED_SPACE_UTF8);
       ptr.set(newBytes);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b45110d/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
index 48b215f..e8654ff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
@@ -1173,7 +1173,7 @@ public abstract class PDataType<T> implements DataType<T>, Comparable<PDataType<
     return object;
   }
 
-  public void pad(ImmutableBytesWritable ptr, Integer maxLength) {
+  public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) {
   }
 
   public static PDataType arrayBaseType(PDataType arrayType) {


[48/50] [abbrv] phoenix git commit: PHOENIX-1865 Fix missing ASL headers

Posted by ma...@apache.org.
PHOENIX-1865 Fix missing ASL headers


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e1bbb944
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e1bbb944
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e1bbb944

Branch: refs/heads/calcite
Commit: e1bbb944126da52d14bf5df580167644a1cd8499
Parents: d147423
Author: Nick Dimiduk <nd...@apache.org>
Authored: Tue Apr 14 16:28:26 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed Apr 15 16:48:57 2015 -0700

----------------------------------------------------------------------
 .../expression/function/RandomFunction.java     | 17 +++++++++++++++
 .../test/resources/datamodel/test_schema.sql    | 16 ++++++++++++++
 phoenix-protocol/src/main/PGuidePosts.proto     | 20 ++++++++++++++++++
 phoenix-spark/pom.xml                           | 22 ++++++++++++++++++++
 phoenix-spark/src/it/resources/log4j.xml        | 21 +++++++++++++++++++
 phoenix-spark/src/it/resources/setup.sql        | 16 ++++++++++++++
 pom.xml                                         |  2 ++
 7 files changed, 114 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
index 535a127..01a4eed 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.expression.function;
 
 import java.io.DataInput;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/datamodel/test_schema.sql b/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
index 498f832..8f16675 100644
--- a/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
+++ b/phoenix-pherf/src/test/resources/datamodel/test_schema.sql
@@ -1,3 +1,19 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
 CREATE TABLE IF NOT EXISTS PHERF.TEST_TABLE (
     TENANT_ID CHAR(15) NOT NULL,
     PARENT_ID CHAR(15) NOT NULL,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/phoenix-protocol/src/main/PGuidePosts.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PGuidePosts.proto b/phoenix-protocol/src/main/PGuidePosts.proto
index 1550391..047a658 100644
--- a/phoenix-protocol/src/main/PGuidePosts.proto
+++ b/phoenix-protocol/src/main/PGuidePosts.proto
@@ -1,3 +1,23 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 option java_package = "org.apache.phoenix.coprocessor.generated";
 option java_outer_classname = "PGuidePostsProtos";
 option java_generic_services = true;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index abed37e..8b06cf7 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -1,3 +1,25 @@
+<?xml version='1.0'?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
 <project xmlns="http://maven.apache.org/POM/4.0.0"
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/phoenix-spark/src/it/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/log4j.xml b/phoenix-spark/src/it/resources/log4j.xml
index 58abece..10c2dc0 100644
--- a/phoenix-spark/src/it/resources/log4j.xml
+++ b/phoenix-spark/src/it/resources/log4j.xml
@@ -1,4 +1,25 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
 <!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
 
 <log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/phoenix-spark/src/it/resources/setup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/setup.sql b/phoenix-spark/src/it/resources/setup.sql
index 14a7e7e..ce74c58 100644
--- a/phoenix-spark/src/it/resources/setup.sql
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -1,3 +1,19 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
 CREATE TABLE table1 (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
 CREATE TABLE table2 (id BIGINT NOT NULL PRIMARY KEY, table1_id BIGINT, "t2col1" VARCHAR)
 UPSERT INTO table1 (id, col1) VALUES (1, 'test_row_1')

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1bbb944/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index c2ff589..b81dfb5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -190,6 +190,8 @@
               <exclude>dev/release_files/**</exclude>
               <exclude>**/target/**</exclude>
               <exclude>**/*.versionsBackup</exclude>
+              <!-- properties files need no license -->
+              <exclude>**/*.properties</exclude>
               <!-- exclude docs -->
               <exclude>docs/**</exclude>
               <!-- exclude examples -->


[22/50] [abbrv] phoenix git commit: PHOENIX-1071 - Add phoenix-spark for Spark integration - memory setting

Posted by ma...@apache.org.
PHOENIX-1071 - Add phoenix-spark for Spark integration - memory setting


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9bbd5ead
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9bbd5ead
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9bbd5ead

Branch: refs/heads/calcite
Commit: 9bbd5ead568ccdbecdea974d10aac93ccb30d9bd
Parents: f2d9080
Author: ravimagham <ra...@apache.org>
Authored: Sun Apr 5 08:29:23 2015 -0700
Committer: ravimagham <ra...@apache.org>
Committed: Sun Apr 5 08:29:23 2015 -0700

----------------------------------------------------------------------
 phoenix-spark/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9bbd5ead/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 3312b09..fd0ccaf 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -502,6 +502,7 @@
             <configuration>
               <parallel>true</parallel>
               <tagsToExclude>Integration-Test</tagsToExclude>
+              <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
             </configuration>
           </execution>
           <execution>
@@ -513,7 +514,6 @@
             <configuration>
               <parallel>false</parallel>
               <tagsToInclude>Integration-Test</tagsToInclude>
-              <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
             </configuration>
           </execution>
         </executions>


[24/50] [abbrv] phoenix git commit: PHOENIX-1580 Support UNION ALL

Posted by ma...@apache.org.
PHOENIX-1580 Support UNION ALL


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c50feca2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c50feca2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c50feca2

Branch: refs/heads/calcite
Commit: c50feca254f4c8ae2505d83f738a6ab9d92a9fd9
Parents: c823be9
Author: maryannxue <we...@intel.com>
Authored: Mon Apr 6 10:46:37 2015 -0400
Committer: maryannxue <we...@intel.com>
Committed: Mon Apr 6 10:46:37 2015 -0400

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/UnionAllIT.java  | 579 +++++++++++++++++++
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  48 +-
 .../apache/phoenix/compile/FromCompiler.java    |   4 +-
 .../apache/phoenix/compile/QueryCompiler.java   |  65 ++-
 .../phoenix/compile/StatementNormalizer.java    |   2 +-
 .../phoenix/compile/SubselectRewriter.java      |   5 +-
 .../apache/phoenix/compile/UnionCompiler.java   |  86 +++
 .../phoenix/exception/SQLExceptionCode.java     |   6 +
 .../apache/phoenix/execute/AggregatePlan.java   |   1 +
 .../org/apache/phoenix/execute/UnionPlan.java   | 190 ++++++
 .../iterate/MergeSortTopNResultIterator.java    |   9 +-
 .../phoenix/iterate/UnionResultIterators.java   | 109 ++++
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  25 +-
 .../apache/phoenix/parse/ParseNodeFactory.java  |  39 +-
 .../apache/phoenix/parse/ParseNodeRewriter.java |   5 +-
 .../apache/phoenix/parse/SelectStatement.java   |  30 +-
 .../apache/phoenix/parse/QueryParserTest.java   |  13 -
 17 files changed, 1147 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
new file mode 100644
index 0000000..b3b2f7d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
@@ -0,0 +1,579 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+public class UnionAllIT extends BaseOwnClusterHBaseManagedTimeIT {
+
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Map<String, String> props = Collections.emptyMap();
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    }
+
+    @Test
+    public void testUnionAllSelects() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "select * from test_table union all select * from b_table union all select * from test_table";
+            ResultSet rs = conn.createStatement().executeQuery(ddl);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testAggregate() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            stmt.setString(1, "d");
+            stmt.setInt(2, 40);
+            stmt.execute();
+            stmt.setString(1, "e");
+            stmt.setInt(2, 50);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            conn.commit();
+
+            String aggregate = "select count(*) from test_table union all select count(*) from b_table union all select count(*) from test_table";
+            ResultSet rs = conn.createStatement().executeQuery(aggregate);
+            assertTrue(rs.next());
+            assertEquals(3,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(2,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(3,rs.getInt(1));
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testGroupBy() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            conn.commit();
+
+            String aggregate = "select count(*), col1 from test_table group by col1 union all select count(*), col1 from b_table group by col1";
+            ResultSet rs = conn.createStatement().executeQuery(aggregate);
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1)); 
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testOrderByLimit() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table1 " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO test_table1 VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            stmt.setString(1, "f");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table1 " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            dml = "UPSERT INTO b_table1 VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            stmt.setString(1, "d");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            stmt.setString(1, "e");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            conn.commit();
+
+            String aggregate = "select count(*), col1 from b_table1 group by col1 union all select count(*), col1 from test_table1 group by col1 order by col1";
+            ResultSet rs = conn.createStatement().executeQuery(aggregate);
+            assertTrue(rs.next());
+            assertEquals(2,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(3,rs.getInt(1));  
+            assertFalse(rs.next());  
+
+            String limit = "select count(*), col1 x from test_table1 group by col1 union all select count(*), col1 x from b_table1 group by col1 order by x limit 2";
+            rs = conn.createStatement().executeQuery(limit);
+            assertTrue(rs.next());
+            assertEquals(2,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));
+            assertFalse(rs.next());  
+
+            String limitOnly = "select * from test_table1 union all select * from b_table1 limit 2";
+            rs = conn.createStatement().executeQuery(limitOnly);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("f",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testSelectDiff() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "select a_string, col1, col1 from test_table union all select * from b_table union all select a_string, col1 from test_table";
+            conn.createStatement().executeQuery(ddl);
+            fail();
+        }  catch (SQLException e) {
+            assertEquals(SQLExceptionCode.SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS.getErrorCode(), e.getErrorCode());
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testJoinInUnionAll() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "select x.a_string, y.col1  from test_table x, b_table y where x.a_string=y.a_string union all " +
+                    "select t.a_string, s.col1 from test_table s, b_table t where s.a_string=t.a_string"; 
+            ResultSet rs = conn.createStatement().executeQuery(ddl);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertFalse(rs.next()); 
+
+            ddl = "select x.a_string, y.col1  from test_table x join b_table y on x.a_string=y.a_string union all " +
+                    "select t.a_string, s.col1 from test_table s inner join b_table t on s.a_string=t.a_string"; 
+            rs = conn.createStatement().executeQuery(ddl);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertFalse(rs.next()); 
+
+            ddl = "select x.a_string, y.col1  from test_table x left join b_table y on x.a_string=y.a_string union all " +
+                    "select t.a_string, s.col1 from test_table s inner join b_table t on s.a_string=t.a_string union all " +
+                    "select y.a_string, x.col1 from b_table x right join test_table y on x.a_string=y.a_string";
+            rs = conn.createStatement().executeQuery(ddl);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2)); 
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(20,rs.getInt(2)); 
+            assertFalse(rs.next()); 
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testDerivedTable() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "select * from (select x.a_string, y.col1  from test_table x, b_table y where x.a_string=y.a_string) union all " +
+                    "select * from (select t.a_string, s.col1 from test_table s, b_table t where s.a_string=t.a_string)"; 
+            ResultSet rs = conn.createStatement().executeQuery(ddl);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertFalse(rs.next()); 
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testUnionAllInSubquery() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "select a_string, col1 from test_table where a_string in (select a_string from test_table union all select a_string from b_table)";
+            conn.createStatement().executeQuery(ddl);
+        }  catch (SQLFeatureNotSupportedException e) {
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testUnionAllInSubqueryDerived() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "select a_string, col1 from test_table where a_string in (select a_string from  " +
+                    "(select * from test_table union all select * from b_table))";
+            conn.createStatement().executeQuery(ddl);
+        }  catch (SQLException e) { 
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testUnionAllWithBindParam() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "select a_string, col1 from b_table where col1=? union all select a_string, col1 from test_table where col1=? ";
+            stmt = conn.prepareStatement(ddl);
+            stmt.setInt(1, 20);
+            stmt.setInt(2, 10);
+            ResultSet rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(10,rs.getInt(2));
+            assertFalse(rs.next()); 
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testExplainUnionAll() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE test_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "CREATE TABLE b_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            ddl = "explain select a_string, col1 from test_table union all select a_string, col1 from b_table order by col1 limit 1";
+            ResultSet rs = conn.createStatement().executeQuery(ddl);
+            assertEquals(
+                    "UNION ALL OVER 2 QUERIES\n" +
+                    "    CLIENT PARALLEL 1-WAY FULL SCAN OVER TEST_TABLE\n" + 
+                    "        SERVER TOP 1 ROW SORTED BY [COL1]\n" + 
+                    "    CLIENT MERGE SORT\n" + 
+                    "    CLIENT PARALLEL 1-WAY FULL SCAN OVER B_TABLE\n" + 
+                    "        SERVER TOP 1 ROW SORTED BY [COL1]\n" + 
+                    "    CLIENT MERGE SORT\n" + 
+                    "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs)); 
+            
+            String limitPlan = 
+                    "UNION ALL OVER 2 QUERIES\n" + 
+                    "    CLIENT SERIAL 1-WAY FULL SCAN OVER TEST_TABLE\n" + 
+                    "        SERVER 2 ROW LIMIT\n" + 
+                    "    CLIENT 2 ROW LIMIT\n" + 
+                    "    CLIENT SERIAL 1-WAY FULL SCAN OVER B_TABLE\n" + 
+                    "        SERVER 2 ROW LIMIT\n" + 
+                    "    CLIENT 2 ROW LIMIT\n" + 
+                    "CLIENT 2 ROW LIMIT";
+            ddl = "explain select a_string, col1 from test_table union all select a_string, col1 from b_table";
+            rs = conn.createStatement().executeQuery(ddl + " limit 2");
+            assertEquals(limitPlan, QueryUtil.getExplainPlan(rs));
+            Statement stmt = conn.createStatement();
+            stmt.setMaxRows(2);
+            rs = stmt.executeQuery(ddl);
+            assertEquals(limitPlan, QueryUtil.getExplainPlan(rs));
+            
+            ddl = "explain select a_string, col1 from test_table union all select a_string, col1 from b_table";
+            rs = conn.createStatement().executeQuery(ddl);
+            assertEquals(
+                    "UNION ALL OVER 2 QUERIES\n" + 
+                    "    CLIENT PARALLEL 1-WAY FULL SCAN OVER TEST_TABLE\n" + 
+                    "    CLIENT PARALLEL 1-WAY FULL SCAN OVER B_TABLE", QueryUtil.getExplainPlan(rs)); 
+        } finally {
+            conn.close();
+        }
+    } 
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 61d5afa..03ec9f5 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -113,6 +113,7 @@ tokens
     TRACE='trace';
     ASYNC='async';
     SAMPLING='sampling';
+    UNION='union';
 }
 
 
@@ -351,19 +352,14 @@ statement returns [BindableStatement ret]
 
 // Parses a select statement which must be the only statement (expects an EOF after the statement).
 query returns [SelectStatement ret]
-    :   SELECT s=hinted_select_node EOF {$ret=s;}
+    :   s=select_node EOF {$ret=s;}
     ;
 
 // Parses a single SQL statement (expects an EOF after the select statement).
 oneStatement returns [BindableStatement ret]
-    :   (SELECT s=hinted_select_node {$ret=s;} 
-    |    ns=non_select_node {$ret=ns;}
-        )
-    ;
-
-non_select_node returns [BindableStatement ret]
 @init{ contextStack.push(new ParseContext()); }
-    :  (s=upsert_node
+    :  (s=select_node
+    |	s=upsert_node
     |   s=delete_node
     |   s=create_table_node
     |   s=create_view_node
@@ -578,40 +574,42 @@ dyn_column_name_or_def returns [ColumnDef ret]
             SortOrder.getDefault()); }
     ;
 
-select_expression returns [SelectStatement ret]
-    :  SELECT s=select_node {$ret = s;}
-    ;
-    
 subquery_expression returns [ParseNode ret]
-    :  s=select_expression {$ret = factory.subquery(s, false);}
+    :  s=select_node {$ret = factory.subquery(s, false);}
     ;
     
-// Parse a full select expression structure.
-select_node returns [SelectStatement ret]
+single_select returns [SelectStatement ret]
 @init{ contextStack.push(new ParseContext()); }
-    :   (d=DISTINCT | ALL)? sel=select_list
+    :   SELECT (h=hintClause)? 
+        (d=DISTINCT | ALL)? sel=select_list
         FROM from=parseFrom
         (WHERE where=expression)?
         (GROUP BY group=group_by)?
         (HAVING having=expression)?
-        (ORDER BY order=order_by)?
-        (LIMIT l=limit)?
-        { ParseContext context = contextStack.peek(); $ret = factory.select(from, null, d!=null, sel, where, group, having, order, l, getBindCount(), context.isAggregate(), context.hasSequences()); }
+        { ParseContext context = contextStack.peek(); $ret = factory.select(from, h, d!=null, sel, where, group, having, null, null, getBindCount(), context.isAggregate(), context.hasSequences()); }
     ;
 finally{ contextStack.pop(); }
 
+unioned_selects returns [List<SelectStatement> ret]
+@init{ret = new ArrayList<SelectStatement>();}
+    :   s=single_select {ret.add(s);} (UNION ALL s=single_select {ret.add(s);})*
+    ;
+    
 // Parse a full select expression structure.
-hinted_select_node returns [SelectStatement ret]
-    :   (hint=hintClause)? 
-        s=select_node
-        { $ret = factory.select(s, hint); }
+select_node returns [SelectStatement ret]
+@init{ contextStack.push(new ParseContext()); }
+    :   u=unioned_selects
+        (ORDER BY order=order_by)?
+        (LIMIT l=limit)?
+        { ParseContext context = contextStack.peek(); $ret = factory.select(u, order, l, getBindCount(), context.isAggregate()); }
     ;
+finally{ contextStack.pop(); }
 
 // Parse a full upsert expression structure.
 upsert_node returns [UpsertStatement ret]
     :   UPSERT (hint=hintClause)? INTO t=from_table_name
         (LPAREN p=upsert_column_refs RPAREN)?
-        ((VALUES LPAREN v=one_or_more_expressions RPAREN) | s=select_expression)
+        ((VALUES LPAREN v=one_or_more_expressions RPAREN) | s=select_node)
         {ret = factory.upsert(factory.namedTable(null,t,p == null ? null : p.getFirst()), hint, p == null ? null : p.getSecond(), v, s, getBindCount()); }
     ;
 
@@ -697,7 +695,7 @@ table_factor returns [TableNode ret]
     :   LPAREN t=table_list RPAREN { $ret = t; }
     |   n=bind_name ((AS)? alias=identifier)? { $ret = factory.bindTable(alias, factory.table(null,n)); } // TODO: review
     |   f=from_table_name ((AS)? alias=identifier)? (LPAREN cdefs=dyn_column_defs RPAREN)? { $ret = factory.namedTable(alias,f,cdefs); }
-    |   LPAREN SELECT s=hinted_select_node RPAREN ((AS)? alias=identifier)? { $ret = factory.derivedTable(alias, s); }
+    |   LPAREN s=select_node RPAREN ((AS)? alias=identifier)? { $ret = factory.derivedTable(alias, s); }
     ;
 
 join_type returns [JoinTableNode.JoinType ret]

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index a57250e..98a1108 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -65,6 +65,7 @@ import org.apache.phoenix.schema.PColumnImpl;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
@@ -72,7 +73,6 @@ import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
-import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
@@ -163,6 +163,8 @@ public class FromCompiler {
     public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection)
     		throws SQLException {
     	TableNode fromNode = statement.getFrom();
+    	if (fromNode == null)
+    	    return EMPTY_TABLE_RESOLVER;
         if (fromNode instanceof NamedTableNode)
             return new SingleTableColumnResolver(connection, (NamedTableNode) fromNode, true, 1);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 2276f4e..f8177e6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.compile;
 
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
@@ -40,6 +41,7 @@ import org.apache.phoenix.execute.ScanPlan;
 import org.apache.phoenix.execute.SortMergeJoinPlan;
 import org.apache.phoenix.execute.TupleProjectionPlan;
 import org.apache.phoenix.execute.TupleProjector;
+import org.apache.phoenix.execute.UnionPlan;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.RowValueConstructorExpression;
@@ -65,6 +67,7 @@ import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.ScanUtil;
 
@@ -72,7 +75,6 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 
-
 /**
  *
  * Class used to build an executable query plan
@@ -109,10 +111,6 @@ public class QueryCompiler {
         this(statement, select, resolver, Collections.<PDatum>emptyList(), null, new SequenceManager(statement), projectTuples);
     }
 
-    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager) throws SQLException {
-        this(statement, select, resolver, targetColumns, parallelIteratorFactory, sequenceManager, true);
-    }
-    
     public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples) throws SQLException {
         this.statement = statement;
         this.select = select;
@@ -135,6 +133,10 @@ public class QueryCompiler {
         this.originalScan = ScanUtil.newScan(scan);
     }
 
+    public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager) throws SQLException {
+        this(statement, select, resolver, targetColumns, parallelIteratorFactory, sequenceManager, true);
+    }
+
     /**
      * Builds an executable query plan from a parsed SQL statement
      * @return executable query plan
@@ -146,7 +148,42 @@ public class QueryCompiler {
      * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
      */
     public QueryPlan compile() throws SQLException{
-        SelectStatement select = this.select;
+        QueryPlan plan;
+        if (select.isUnion()) {
+            plan = compileUnionAll(select);
+        } else {
+            plan = compileSelect(select);
+        }
+        return plan;
+    }
+
+    public QueryPlan compileUnionAll(SelectStatement select) throws SQLException { 
+        List<SelectStatement> unionAllSelects = select.getSelects();
+        List<QueryPlan> plans = new ArrayList<QueryPlan>();
+
+        for (int i=0; i < unionAllSelects.size(); i++ ) {
+            SelectStatement subSelect = unionAllSelects.get(i);
+            // Push down order-by and limit into sub-selects.
+            if (!select.getOrderBy().isEmpty() || select.getLimit() != null) {
+                subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), select.getLimit());
+            }
+            QueryPlan subPlan = compileSubquery(subSelect, true);
+            TupleProjector projector = new TupleProjector(subPlan.getProjector());
+            subPlan = new TupleProjectionPlan(subPlan, projector, null);
+            plans.add(subPlan);
+        }
+        UnionCompiler.checkProjectionNumAndTypes(plans);
+
+        TableRef tableRef = UnionCompiler.contructSchemaTable(statement, plans.get(0));
+        ColumnResolver resolver = FromCompiler.getResolver(tableRef);
+        StatementContext context = new StatementContext(statement, resolver, scan, sequenceManager);
+
+        QueryPlan plan = compileSingleFlatQuery(context, select, statement.getParameters(), false, false, null, null, false);
+        plan =  new UnionPlan(context, select, tableRef, plan.getProjector(), plan.getLimit(), plan.getOrderBy(), GroupBy.EMPTY_GROUP_BY, plans, null); 
+        return plan;
+    }
+
+    public QueryPlan compileSelect(SelectStatement select) throws SQLException{
         List<Object> binds = statement.getParameters();
         StatementContext context = new StatementContext(statement, resolver, scan, sequenceManager);
         if (select.isJoin()) {
@@ -161,7 +198,7 @@ public class QueryCompiler {
             return compileSingleQuery(context, select, binds, false, true);
         }
     }
-    
+
     /*
      * Call compileJoinQuery() for join queries recursively down to the leaf JoinTable nodes.
      * This matches the input JoinTable node against patterns in the following order:
@@ -207,7 +244,7 @@ public class QueryCompiler {
                 table.projectColumns(context.getScan());
                 return compileSingleQuery(context, subquery, binds, asSubquery, !asSubquery);
             }
-            QueryPlan plan = compileSubquery(subquery);
+            QueryPlan plan = compileSubquery(subquery, false);
             PTable projectedTable = table.createProjectedTable(plan.getProjector());
             context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable));
             return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), table.compilePostFilterExpression(context));
@@ -229,7 +266,7 @@ public class QueryCompiler {
                 tupleProjector = new TupleProjector(initialProjectedTable);
             } else {
                 SelectStatement subquery = table.getAsSubquery(orderBy);
-                QueryPlan plan = compileSubquery(subquery);
+                QueryPlan plan = compileSubquery(subquery, false);
                 initialProjectedTable = table.createProjectedTable(plan.getProjector());
                 tableRef = plan.getTableRef();
                 context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
@@ -309,7 +346,7 @@ public class QueryCompiler {
                 tupleProjector = new TupleProjector(rhsProjTable);
             } else {
                 SelectStatement subquery = rhsTable.getAsSubquery(orderBy);
-                QueryPlan plan = compileSubquery(subquery);
+                QueryPlan plan = compileSubquery(subquery, false);
                 rhsProjTable = rhsTable.createProjectedTable(plan.getProjector());
                 rhsTableRef = plan.getTableRef();
                 context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap());
@@ -425,7 +462,7 @@ public class QueryCompiler {
         return type == JoinType.Semi && complete;
     }
 
-    protected QueryPlan compileSubquery(SelectStatement subquery) throws SQLException {
+    protected QueryPlan compileSubquery(SelectStatement subquery, boolean pushDownMaxRows) throws SQLException {
         PhoenixConnection connection = this.statement.getConnection();
         subquery = SubselectRewriter.flatten(subquery, connection);
         ColumnResolver resolver = FromCompiler.getResolverForQuery(subquery, connection);
@@ -436,7 +473,7 @@ public class QueryCompiler {
             subquery = StatementNormalizer.normalize(transformedSubquery, resolver);
         }
         int maxRows = this.statement.getMaxRows();
-        this.statement.setMaxRows(0); // overwrite maxRows to avoid its impact on inner queries.
+        this.statement.setMaxRows(pushDownMaxRows ? maxRows : 0); // overwrite maxRows to avoid its impact on inner queries.
         QueryPlan plan = new QueryCompiler(this.statement, subquery, resolver, false).compile();
         plan = statement.getConnection().getQueryServices().getOptimizer().optimize(statement, plan);
         this.statement.setMaxRows(maxRows); // restore maxRows.
@@ -449,7 +486,7 @@ public class QueryCompiler {
             return compileSingleFlatQuery(context, select, binds, asSubquery, allowPageFilter, null, null, true);
         }
 
-        QueryPlan innerPlan = compileSubquery(innerSelect);
+        QueryPlan innerPlan = compileSubquery(innerSelect, false);
         TupleProjector tupleProjector = new TupleProjector(innerPlan.getProjector());
         innerPlan = new TupleProjectionPlan(innerPlan, tupleProjector, null);
 
@@ -526,7 +563,7 @@ public class QueryCompiler {
             int i = 0;
             for (SubqueryParseNode subqueryNode : subqueries) {
                 SelectStatement stmt = subqueryNode.getSelectNode();
-                subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt), stmt, subqueryNode.expectSingleRow());
+                subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, subqueryNode.expectSingleRow());
             }
             plan = HashJoinPlan.create(select, plan, null, subPlans);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
index f6a6f7a..b9897b1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java
@@ -99,7 +99,7 @@ public class StatementNormalizer extends ParseNodeRewriter {
             if (selectNodes != normSelectNodes) {
                 statement = NODE_FACTORY.select(statement.getFrom(), statement.getHint(), statement.isDistinct(),
                         normSelectNodes, statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(),
-                        statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                        statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
             }
         }
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
index 805894f..6862802 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java
@@ -70,6 +70,8 @@ public class SubselectRewriter extends ParseNodeRewriter {
         while (from != null && from instanceof DerivedTableNode) {
             DerivedTableNode derivedTable = (DerivedTableNode) from;
             SelectStatement subselect = derivedTable.getSelect();
+            if (subselect.isUnion())
+                break;
             ColumnResolver resolver = FromCompiler.getResolverForQuery(subselect, connection);
             SubselectRewriter rewriter = new SubselectRewriter(resolver, subselect.getSelect(), derivedTable.getAlias());
             SelectStatement ret = rewriter.flatten(select, subselect);
@@ -202,7 +204,8 @@ public class SubselectRewriter extends ParseNodeRewriter {
             isAggregateRewrite = true;
         }
         
-        return NODE_FACTORY.select(subselect.getFrom(), hintRewrite, isDistinctRewrite, selectNodesRewrite, whereRewrite, groupByRewrite, havingRewrite, orderByRewrite, limitRewrite, select.getBindCount(), isAggregateRewrite, select.hasSequence());
+        return NODE_FACTORY.select(subselect.getFrom(), hintRewrite, isDistinctRewrite, selectNodesRewrite, whereRewrite, groupByRewrite, 
+            havingRewrite, orderByRewrite, limitRewrite, select.getBindCount(), isAggregateRewrite, select.hasSequence(), select.getSelects());
     }
     
     private SelectStatement applyPostFilters(SelectStatement statement, List<ParseNode> postFilters) throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
new file mode 100644
index 0000000..3f069ff
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PColumnImpl;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PDataType;
+
+public class UnionCompiler {
+    private static final PName UNION_FAMILY_NAME = PNameFactory.newName("unionFamilyName");
+    private static final PName UNION_SCHEMA_NAME = PNameFactory.newName("unionSchemaName");
+    private static final PName UNION_TABLE_NAME = PNameFactory.newName("unionTableName");
+
+    public static List<QueryPlan> checkProjectionNumAndTypes(List<QueryPlan> selectPlans) throws SQLException {
+        QueryPlan plan = selectPlans.get(0);
+        int columnCount = plan.getProjector().getColumnCount();
+        List<? extends ColumnProjector> projectors = plan.getProjector().getColumnProjectors();
+        List<PDataType> selectTypes = new ArrayList<PDataType>();
+        for (ColumnProjector pro : projectors) {
+            selectTypes.add(pro.getExpression().getDataType());
+        }
+
+        for (int i = 1;  i < selectPlans.size(); i++) {     
+            plan = selectPlans.get(i);
+            if (columnCount !=plan.getProjector().getColumnCount()) {
+                throw new SQLExceptionInfo.Builder(SQLExceptionCode.SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS).setMessage(".").build().buildException();
+            }
+            List<? extends ColumnProjector> pros =  plan.getProjector().getColumnProjectors();
+            for (int j = 0; j < columnCount; j++) {
+                PDataType type = pros.get(j).getExpression().getDataType();
+                if (!type.isCoercibleTo(selectTypes.get(j))) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS).setMessage(".").build().buildException();
+                }
+            }
+        }
+        return selectPlans;
+    }
+
+    public static TableRef contructSchemaTable(PhoenixStatement statement, QueryPlan plan) throws SQLException {
+        List<PColumn> projectedColumns = new ArrayList<PColumn>();
+        for (int i=0; i< plan.getProjector().getColumnCount(); i++) {
+            ColumnProjector colProj = plan.getProjector().getColumnProjector(i);
+            Expression sourceExpression = colProj.getExpression();
+            PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(colProj.getName()), UNION_FAMILY_NAME,
+                    sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
+                    i, sourceExpression.getSortOrder(), 500, null, false, sourceExpression.toString());
+            projectedColumns.add(projectedColumn);
+        }
+        Long scn = statement.getConnection().getSCN();
+        PTable tempTable = PTableImpl.makePTable(statement.getConnection().getTenantId(), UNION_SCHEMA_NAME, UNION_TABLE_NAME, 
+                PTableType.SUBQUERY, null, HConstants.LATEST_TIMESTAMP, scn == null ? HConstants.LATEST_TIMESTAMP : scn, null, null, projectedColumns, null, null, null,
+                        true, null, null, null, true, true, true, null, null, null);
+        TableRef tableRef = new TableRef(null, tempTable, 0, false);
+        return tableRef;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 2eea53b..9c38348 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -160,6 +160,12 @@ public enum SQLExceptionCode {
      STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX(522, "42899", "Stateless expression not allowed in an index"),
 
      /** 
+      * Union All related errors
+      */
+     SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS(525, "42902", "SELECT column number differs in a Union All query is not allowed"),
+     SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS(526, "42903", "SELECT column types differ in a Union All query is not allowed"),
+
+     /** 
      * HBase and Phoenix specific implementation defined sub-classes.
      * Column family related exceptions.
      * 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 617cc48..4f344b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -177,6 +177,7 @@ public class AggregatePlan extends BaseQueryPlan {
         }
         ParallelIterators parallelIterators = new ParallelIterators(this, null, wrapParallelIteratorFactory());
         splits = parallelIterators.getSplits();
+        scans = parallelIterators.getScans();
 
         AggregatingResultIterator aggResultIterator;
         // No need to merge sort for ungrouped aggregation

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
new file mode 100644
index 0000000..973f37e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import java.sql.ParameterMetaData;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.compile.ScanRanges;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.iterate.ConcatResultIterator;
+import org.apache.phoenix.iterate.LimitingResultIterator;
+import org.apache.phoenix.iterate.MergeSortTopNResultIterator;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.iterate.UnionResultIterators;
+import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.SQLCloseable;
+
+
+public class UnionPlan implements QueryPlan {
+    private static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K
+
+    private final TableRef tableRef;
+    private final FilterableStatement statement;
+    private final ParameterMetaData paramMetaData;
+    private final OrderBy orderBy;
+    private final StatementContext context;
+    private final Integer limit;
+    private final GroupBy groupBy;
+    private final RowProjector projector;
+    private final boolean isDegenerate;
+    private final List<QueryPlan> plans;
+    private UnionResultIterators iterators;
+
+    public UnionPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector,
+            Integer limit, OrderBy orderBy, GroupBy groupBy, List<QueryPlan> plans, ParameterMetaData paramMetaData) throws SQLException {
+        this.context = context;
+        this.statement = statement;
+        this.tableRef = table;
+        this.projector = projector;
+        this.limit = limit;
+        this.orderBy = orderBy;
+        this.groupBy = groupBy;
+        this.plans = plans;
+        this.paramMetaData = paramMetaData;
+        boolean isDegen = true;
+        for (QueryPlan plan : plans) {           
+            if (plan.getContext().getScanRanges() != ScanRanges.NOTHING) {
+                isDegen = false;
+                break;
+            } 
+        }
+        this.isDegenerate = isDegen;     
+    }
+
+    @Override
+    public boolean isDegenerate() {
+        return isDegenerate;
+    }
+
+    @Override
+    public List<KeyRange> getSplits() {
+        if (iterators == null)
+            return null;
+        return iterators.getSplits();
+    }
+
+    @Override
+    public List<List<Scan>> getScans() {
+        if (iterators == null)
+            return null;
+        return iterators.getScans();
+    }
+
+    @Override
+    public GroupBy getGroupBy() {
+        return groupBy;
+    }
+
+    @Override
+    public OrderBy getOrderBy() {
+        return orderBy;
+    }
+
+    @Override
+    public TableRef getTableRef() {
+        return tableRef;
+    }
+
+    @Override
+    public Integer getLimit() {
+        return limit;
+    }
+
+    @Override
+    public RowProjector getProjector() {
+        return projector;
+    }
+
+    @Override
+    public final ResultIterator iterator() throws SQLException {
+        return iterator(Collections.<SQLCloseable>emptyList());
+    }
+
+    public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies) throws SQLException {
+        this.iterators = new UnionResultIterators(plans);
+        ResultIterator scanner;      
+        boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
+
+        if (isOrdered) { // TopN
+            scanner = new MergeSortTopNResultIterator(iterators, limit, orderBy.getOrderByExpressions());
+        } else {
+            scanner = new ConcatResultIterator(iterators);
+            if (limit != null) {
+                scanner = new LimitingResultIterator(scanner, limit);
+            }          
+        }
+        return scanner;
+    }
+
+    @Override
+    public ExplainPlan getExplainPlan() throws SQLException {
+        List<String> steps = new ArrayList<String>();
+        steps.add("UNION ALL OVER " + this.plans.size() + " QUERIES");
+        ResultIterator iterator = iterator();
+        iterator.explain(steps);
+        // Indent plans steps nested under union, except last client-side merge/concat step (if there is one)
+        int offset = !orderBy.getOrderByExpressions().isEmpty() || limit != null ? 1 : 0;
+        for (int i = 1 ; i < steps.size()-offset; i++) {
+            steps.set(i, "    " + steps.get(i));
+        }
+        return new ExplainPlan(steps);
+    }
+
+
+    @Override
+    public long getEstimatedSize() {
+        return DEFAULT_ESTIMATED_SIZE;
+    }
+
+    @Override
+    public ParameterMetaData getParameterMetaData() {
+        return paramMetaData;
+    }
+
+    @Override
+    public FilterableStatement getStatement() {
+        return statement;
+    }
+
+    @Override
+    public StatementContext getContext() {
+        return context;
+    }
+
+    @Override
+    public boolean isRowKeyOrdered() {
+        return groupBy.isEmpty() ? orderBy.getOrderByExpressions().isEmpty() : groupBy.isOrderPreserving();
+    }
+
+    public List<QueryPlan> getPlans() {
+        return this.plans;
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
index 71259e0..87a6a62 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
@@ -37,15 +37,22 @@ import org.apache.phoenix.schema.tuple.Tuple;
 public class MergeSortTopNResultIterator extends MergeSortResultIterator {
 
     private final int limit;
+    private final boolean clientSideOnly;
     private int count = 0;
     private final List<OrderByExpression> orderByColumns;
     private final ImmutableBytesWritable ptr1 = new ImmutableBytesWritable();
     private final ImmutableBytesWritable ptr2 = new ImmutableBytesWritable();
     
-    public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit, List<OrderByExpression> orderByColumns) {
+    public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit,
+            List<OrderByExpression> orderByColumns, boolean clientSideOnly) {
         super(iterators);
         this.limit = limit == null ? -1 : limit;
         this.orderByColumns = orderByColumns;
+        this.clientSideOnly = clientSideOnly;
+    }
+
+    public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit, List<OrderByExpression> orderByColumns) {
+        this(iterators, limit, orderByColumns, false);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
new file mode 100644
index 0000000..b7c8b21
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.iterate;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.util.ServerUtil;
+
+import com.google.common.collect.Lists;
+
+
+/**
+ *
+ * Create a union ResultIterators
+ *
+ * 
+ */
+public class UnionResultIterators implements ResultIterators {
+    private final List<KeyRange> splits;
+    private final List<List<Scan>> scans;
+    private final List<PeekingResultIterator> iterators;
+    private final List<QueryPlan> plans;
+
+    public UnionResultIterators(List<QueryPlan> plans) throws SQLException {
+        this.plans = plans;
+        int nPlans = plans.size();
+        iterators = Lists.newArrayListWithExpectedSize(nPlans);
+        splits = Lists.newArrayListWithExpectedSize(nPlans * 30); 
+        scans = Lists.newArrayListWithExpectedSize(nPlans * 10); 
+        for (QueryPlan plan : this.plans) {
+            iterators.add(LookAheadResultIterator.wrap(plan.iterator()));
+            splits.addAll(plan.getSplits()); 
+            scans.addAll(plan.getScans());
+        }
+    }
+
+    @Override
+    public List<KeyRange> getSplits() {
+        return splits;
+    }
+
+    @Override
+    public void close() throws SQLException {   
+        SQLException toThrow = null;
+        try {
+            if (iterators != null) {
+                for (int index=0; index < iterators.size(); index++) {
+                    PeekingResultIterator iterator = iterators.get(index);
+                    try {
+                        iterator.close();
+                    } catch (Exception e) {
+                        if (toThrow == null) {
+                            toThrow = ServerUtil.parseServerException(e);
+                        } else {
+                            toThrow.setNextException(ServerUtil.parseServerException(e));
+                        }
+                    }
+                }
+            }
+        } catch (Exception e) {
+            toThrow = ServerUtil.parseServerException(e);
+        } finally {
+            if (toThrow != null) {
+                throw toThrow;
+            }
+        }
+    }
+
+    @Override
+    public List<List<Scan>> getScans() {
+        return scans;
+    }
+
+    @Override
+    public int size() {
+        return scans.size();
+    }
+
+    @Override
+    public void explain(List<String> planSteps) {
+        for (int index=0; index < iterators.size(); index++) {
+            iterators.get(index).explain(planSteps);
+        }
+    }
+
+    @Override 
+    public List<PeekingResultIterator> getIterators() throws SQLException {    
+        return iterators;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index ee6b016..462e1f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -330,9 +330,15 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
     private static class ExecutableSelectStatement extends SelectStatement implements CompilableStatement {
         private ExecutableSelectStatement(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
                 List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
-            super(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence);
+            this(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence, Collections.<SelectStatement>emptyList());
         }
 
+        private ExecutableSelectStatement(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
+                List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate,
+                boolean hasSequence, List<SelectStatement> selects) {
+            super(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence, selects);
+        }
+        
         @SuppressWarnings("unchecked")
         @Override
         public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException {
@@ -486,7 +492,6 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
                 public boolean isRowKeyOrdered() {
                     return true;
                 }
-                
             };
         }
     }
@@ -894,12 +899,20 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
     protected static class ExecutableNodeFactory extends ParseNodeFactory {
         @Override
         public ExecutableSelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select,
-                                                ParseNode where, List<ParseNode> groupBy, ParseNode having,
-                                                List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
+                ParseNode where, List<ParseNode> groupBy, ParseNode having,
+                List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
+            return this.select(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence, 
+                Collections.<SelectStatement>emptyList());
+        }
+
+        @Override
+        public ExecutableSelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
+                List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate,
+                boolean hasSequence, List<SelectStatement> selects) {
             return new ExecutableSelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.<ParseNode>emptyList() : groupBy,
-                    having, orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence);
+                    having, orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence, selects);
         }
-        
+
         @Override
         public ExecutableUpsertStatement upsert(NamedTableNode table, HintNode hintNode, List<ColumnName> columns, List<ParseNode> values, SelectStatement select, int bindCount) {
             return new ExecutableUpsertStatement(table, hintNode, columns, values, select, bindCount);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 62db00a..5aba933 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.parse;
 
 import java.lang.reflect.Constructor;
 import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -611,6 +612,13 @@ public class ParseNodeFactory {
         return new OrderByNode(expression, nullsLast, orderAscending);
     }
 
+    public SelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
+            List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, 
+            boolean hasSequence, List<SelectStatement> selects) {
+
+        return new SelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.<ParseNode>emptyList() : groupBy, having,
+                orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence, selects);
+    } 
 
     public SelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
             List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
@@ -618,7 +626,7 @@ public class ParseNodeFactory {
         return new SelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.<ParseNode>emptyList() : groupBy, having,
                 orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence);
     }
-
+    
     public UpsertStatement upsert(NamedTableNode table, HintNode hint, List<ColumnName> columns, List<ParseNode> values, SelectStatement select, int bindCount) {
         return new UpsertStatement(table, hint, columns, values, select, bindCount);
     }
@@ -681,7 +689,7 @@ public class ParseNodeFactory {
     public SelectStatement select(SelectStatement statement, HintNode hint) {
         return hint == null || hint.isEmpty() ? statement : select(statement.getFrom(), hint, statement.isDistinct(), statement.getSelect(),
                 statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), statement.getLimit(),
-                statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, HintNode hint, ParseNode where) {
@@ -690,13 +698,36 @@ public class ParseNodeFactory {
                 statement.hasSequence());
     }
 
+    public SelectStatement select(SelectStatement statement, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate) {
+        return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
+            statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, limit,
+            bindCount, isAggregate || statement.isAggregate(), statement.hasSequence());
+
+    }
+
     public SelectStatement select(SelectStatement statement, LimitNode limit) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
-                statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), limit,
-                statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+            statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), limit,
+            statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+    }
+
+    public SelectStatement select(SelectStatement statement, List<OrderByNode> orderBy, LimitNode limit) {
+        return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
+            statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, limit,
+            statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+    }
+
+    public SelectStatement select(List<SelectStatement> statements, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate) {
+        if (statements.size() == 1)
+            return select(statements.get(0), orderBy, limit, bindCount, isAggregate);
+        
+        return select(null, HintNode.EMPTY_HINT_NODE, false, Lists.newArrayList(aliasedNode(null, wildcard())), 
+                null, null, null, orderBy, limit, bindCount, false, false, statements);
     }
 
     public SubqueryParseNode subquery(SelectStatement select, boolean expectSingleRow) {
+        if (select.isUnion()) 
+            throw new RuntimeException(new SQLFeatureNotSupportedException());
         return new SubqueryParseNode(select, expectSingleRow);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
index c6514dc..4ce893d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java
@@ -58,7 +58,7 @@ public class ParseNodeRewriter extends TraverseAllParseNodeVisitor<ParseNode> {
     public static SelectStatement rewrite(SelectStatement statement, ParseNodeRewriter rewriter) throws SQLException {
         Map<String,ParseNode> aliasMap = rewriter.getAliasMap();
         TableNode from = statement.getFrom();
-        TableNode normFrom = from.accept(new TableNodeRewriter(rewriter));
+        TableNode normFrom = from == null ? null : from.accept(new TableNodeRewriter(rewriter));
         ParseNode where = statement.getWhere();
         ParseNode normWhere = where;
         if (where != null) {
@@ -150,7 +150,8 @@ public class ParseNodeRewriter extends TraverseAllParseNodeVisitor<ParseNode> {
         }
         return NODE_FACTORY.select(normFrom, statement.getHint(), statement.isDistinct(),
                 normSelectNodes, normWhere, normGroupByNodes, normHaving, normOrderByNodes,
-                statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(),
+                statement.getSelects());
     }
 
     private Map<String, ParseNode> getAliasMap() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
index 71cabd6..08cec87 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.parse;
 
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -81,6 +82,14 @@ public class SelectStatement implements FilterableStatement {
                 select.getOrderBy(), select.getLimit(), select.getBindCount(), select.isAggregate(), select.hasSequence());
     }
     
+    // Copy constructor for sub select statements in a union
+    public static SelectStatement create(SelectStatement select, 
+            List<OrderByNode> orderBy, LimitNode limit, boolean isAggregate) {
+        return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), 
+                select.getSelect(), select.getWhere(), select.getGroupBy(), select.getHaving(), 
+                orderBy, limit, select.getBindCount(), isAggregate, select.hasSequence());
+    }
+
     private final TableNode fromTable;
     private final HintNode hint;
     private final boolean isDistinct;
@@ -93,6 +102,7 @@ public class SelectStatement implements FilterableStatement {
     private final int bindCount;
     private final boolean isAggregate;
     private final boolean hasSequence;
+    private final List<SelectStatement> selects = new ArrayList<SelectStatement>();
     
     @Override
     public final String toString() {
@@ -205,7 +215,7 @@ public class SelectStatement implements FilterableStatement {
     
     protected SelectStatement(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select,
             ParseNode where, List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit,
-            int bindCount, boolean isAggregate, boolean hasSequence) {
+            int bindCount, boolean isAggregate, boolean hasSequence, List<SelectStatement> selects) {
         this.fromTable = from;
         this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint;
         this.isDistinct = isDistinct;
@@ -218,6 +228,16 @@ public class SelectStatement implements FilterableStatement {
         this.bindCount = bindCount;
         this.isAggregate = isAggregate || groupBy.size() != countConstants(groupBy) || this.having != null;
         this.hasSequence = hasSequence;
+        if (!selects.isEmpty()) {
+            this.selects.addAll(selects);
+        }
+    }
+
+    public SelectStatement(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select,
+            ParseNode where, List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit,
+            int bindCount, boolean isAggregate, boolean hasSequence) {
+        this(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence,
+                Collections.<SelectStatement>emptyList());
     }
     
     @Override
@@ -298,4 +318,12 @@ public class SelectStatement implements FilterableStatement {
         
         return ((DerivedTableNode) fromTable).getSelect();
     }
+
+    public boolean isUnion() {
+        return !getSelects().isEmpty();
+    }
+
+    public List<SelectStatement> getSelects() {
+        return selects;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c50feca2/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index 182757f..21a63c7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -635,19 +635,6 @@ public class QueryParserTest {
     }
 
     @Test
-    public void testInvalidUpsertSelectHint() throws Exception {
-        String sql = (
-                (
-                        "upsert into t select /*+ NO_INDEX */ k from t where k in ( 1,2 )"));
-        try {
-            parseQuery(sql);
-            fail();
-        } catch (SQLException e) {
-            assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode());
-        }
-    }
-
-    @Test
     public void testTableNameStartsWithUnderscore() throws Exception {
         String sql = (
                 (


[32/50] [abbrv] phoenix git commit: PHOENIX-1071 Get the phoenix-spark integration tests running.

Posted by ma...@apache.org.
PHOENIX-1071 Get the phoenix-spark integration tests running.

Uses the BaseHBaseManagedTimeIT framework now for creating the
test cluster and setup/teardown.

Tested with Java 7u75 i386 on Ubuntu, and 7u40 x64 on OS X.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/623829db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/623829db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/623829db

Branch: refs/heads/calcite
Commit: 623829dbeb13933dd6fc38821b258db87cc402a8
Parents: ca4e212
Author: Josh Mahonin <jm...@gmail.com>
Authored: Tue Apr 7 22:33:17 2015 -0400
Committer: ravimagham <ra...@apache.org>
Committed: Thu Apr 9 01:37:31 2015 -0700

----------------------------------------------------------------------
 phoenix-spark/pom.xml                           |  20 +-
 phoenix-spark/src/it/resources/log4j.xml        |   8 +
 .../apache/phoenix/spark/PhoenixRDDTest.scala   | 333 -------------------
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 326 ++++++++++++++++++
 4 files changed, 347 insertions(+), 340 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/623829db/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 412f59a..abed37e 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -22,7 +22,12 @@
       <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-core</artifactId>
     </dependency>
-
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-core</artifactId>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
     <!-- Force import of Spark's servlet API for unit tests -->
     <dependency>
       <groupId>javax.servlet</groupId>
@@ -46,7 +51,7 @@
     <dependency>
       <groupId>org.scalatest</groupId>
       <artifactId>scalatest_${scala.binary.version}</artifactId>
-      <version>2.2.2</version>
+      <version>2.2.4</version>
       <scope>test</scope>
     </dependency>
 
@@ -447,6 +452,8 @@
   </dependencies>
 
   <build>
+    <testSourceDirectory>src/it/scala</testSourceDirectory>
+    <testResources><testResource><directory>src/it/resources</directory></testResource></testResources>
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
@@ -500,9 +507,7 @@
               <goal>test</goal>
             </goals>
             <configuration>
-              <parallel>true</parallel>
-              <tagsToExclude>Integration-Test</tagsToExclude>
-              <argLine>-Xmx2g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
+              <skipTests>true</skipTests>
             </configuration>
           </execution>
           <execution>
@@ -512,8 +517,9 @@
               <goal>test</goal>
             </goals>
             <configuration>
-              <parallel>false</parallel>
-              <tagsToInclude>Integration-Test</tagsToInclude>
+              <parallel>true</parallel>
+              <tagsToExclude>Integration-Test</tagsToExclude>
+              <argLine>-Xmx1536m -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
             </configuration>
           </execution>
         </executions>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/623829db/phoenix-spark/src/it/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/log4j.xml b/phoenix-spark/src/it/resources/log4j.xml
index d4799da..58abece 100644
--- a/phoenix-spark/src/it/resources/log4j.xml
+++ b/phoenix-spark/src/it/resources/log4j.xml
@@ -26,6 +26,14 @@
     <level value="ERROR"/>
   </logger>
 
+  <logger name="org.spark-project.jetty">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="akka">
+    <level value="ERROR"/>
+  </logger>
+
   <logger name="BlockStateChange">
     <level value="ERROR"/>
   </logger>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/623829db/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
deleted file mode 100644
index 63cb6e4..0000000
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
- */
-package org.apache.phoenix.spark
-
-import java.sql.{Connection, DriverManager}
-import java.util.Date
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hbase.{HConstants, HBaseTestingUtility}
-import org.apache.phoenix.schema.ColumnNotFoundException
-import org.apache.phoenix.schema.types.PVarchar
-import org.apache.phoenix.util.ColumnInfo
-import org.apache.spark.sql.SQLContext
-import org.apache.spark.sql.types.{StringType, StructField}
-import org.apache.spark.{SparkConf, SparkContext}
-import org.joda.time.DateTime
-import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
-import org.apache.phoenix.spark._
-
-import scala.collection.mutable.ListBuffer
-
-class PhoenixRDDTest extends FunSuite with Matchers with BeforeAndAfterAll {
-  lazy val hbaseTestingUtility = {
-    new HBaseTestingUtility()
-  }
-
-  lazy val hbaseConfiguration = {
-    val conf = hbaseTestingUtility.getConfiguration
-
-    val quorum = conf.get("hbase.zookeeper.quorum")
-    val clientPort = conf.get("hbase.zookeeper.property.clientPort")
-    val znodeParent = conf.get("zookeeper.znode.parent")
-
-    // This is an odd one - the Zookeeper Quorum entry in the config is totally wrong. It's
-    // just reporting localhost.
-    conf.set(org.apache.hadoop.hbase.HConstants.ZOOKEEPER_QUORUM, s"$quorum:$clientPort:$znodeParent")
-
-    conf
-  }
-
-  lazy val quorumAddress = {
-    hbaseConfiguration.get("hbase.zookeeper.quorum")
-  }
-
-  lazy val zookeeperClientPort = {
-    hbaseConfiguration.get("hbase.zookeeper.property.clientPort")
-  }
-
-  lazy val zookeeperZnodeParent = {
-    hbaseConfiguration.get("zookeeper.znode.parent")
-  }
-
-  lazy val hbaseConnectionString = {
-    s"$quorumAddress:$zookeeperClientPort:$zookeeperZnodeParent"
-  }
-
-  var conn: Connection = _
-
-  override def beforeAll() {
-    hbaseTestingUtility.startMiniCluster()
-
-    conn = DriverManager.getConnection(s"jdbc:phoenix:$hbaseConnectionString")
-
-    conn.setAutoCommit(true)
-
-    // each SQL statement used to set up Phoenix must be on a single line. Yes, that
-    // can potentially make large lines.
-    val setupSqlSource = getClass.getClassLoader.getResourceAsStream("setup.sql")
-
-    val setupSql = scala.io.Source.fromInputStream(setupSqlSource).getLines()
-
-    for (sql <- setupSql) {
-      val stmt = conn.createStatement()
-
-      stmt.execute(sql)
-
-      stmt.close()
-    }
-
-    conn.commit()
-  }
-
-  override def afterAll() {
-    conn.close()
-    hbaseTestingUtility.shutdownMiniCluster()
-  }
-
-  val conf = new SparkConf().set("spark.ui.showConsoleProgress", "false")
-
-  val sc = new SparkContext("local[1]", "PhoenixSparkTest", conf)
-
-  def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
-    val query = "SELECT %s FROM \"%s\"" format(columns.map(f => "\"" + f + "\"").mkString(", "), table)
-
-    query + (predicate match {
-      case Some(p: String) => " WHERE " + p
-      case _ => ""
-    })
-  }
-
-  test("Can create valid SQL") {
-    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
-      conf = hbaseConfiguration)
-
-    rdd.buildSql("MyTable", Array("Foo", "Bar"), None) should
-      equal("SELECT \"Foo\", \"Bar\" FROM \"MyTable\"")
-  }
-
-  test("Can convert Phoenix schema") {
-    val phoenixSchema = List(
-      new ColumnInfo("varcharColumn", PVarchar.INSTANCE.getSqlType)
-    )
-
-    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
-      conf = hbaseConfiguration)
-
-    val catalystSchema = rdd.phoenixSchemaToCatalystSchema(phoenixSchema)
-
-    val expected = List(StructField("varcharColumn", StringType, nullable = true))
-
-    catalystSchema shouldEqual expected
-  }
-
-  test("Can create schema RDD and execute query") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
-
-    df1.registerTempTable("sql_table_1")
-
-    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
-      conf = hbaseConfiguration)
-
-    df2.registerTempTable("sql_table_2")
-
-    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 6L
-  }
-
-  test("Can create schema RDD and execute query on case sensitive table (no config)") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"), zkUrl = Some(hbaseConnectionString))
-
-    df1.registerTempTable("table3")
-
-    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 2L
-  }
-
-  test("Can create schema RDD and execute constrained query") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
-
-    df1.registerTempTable("sql_table_1")
-
-    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
-      predicate = Some("\"ID\" = 1"),
-      conf = hbaseConfiguration)
-
-    df2.registerTempTable("sql_table_2")
-
-    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 1L
-  }
-
-  test("Using a predicate referring to a non-existent column should fail") {
-    intercept[RuntimeException] {
-      val sqlContext = new SQLContext(sc)
-
-      val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
-        predicate = Some("foo = bar"),
-        conf = hbaseConfiguration)
-
-      df1.registerTempTable("table3")
-
-      val sqlRdd = sqlContext.sql("SELECT * FROM table3")
-
-      // we have to execute an action before the predicate failure can occur
-      val count = sqlRdd.count()
-    }.getCause shouldBe a [ColumnNotFoundException]
-  }
-
-  test("Can create schema RDD with predicate that will never match") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
-      predicate = Some("\"id\" = -1"),
-      conf = hbaseConfiguration)
-
-    df1.registerTempTable("table3")
-
-    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 0L
-  }
-
-  test("Can create schema RDD with complex predicate") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("DATE_PREDICATE_TEST_TABLE", Array("ID", "TIMESERIES_KEY"),
-      predicate = Some("ID > 0 AND TIMESERIES_KEY BETWEEN CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)"),
-      conf = hbaseConfiguration)
-    
-    df1.registerTempTable("date_predicate_test_table")
-
-    val sqlRdd = df1.sqlContext.sql("SELECT * FROM date_predicate_test_table")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 0L
-  }
-
-  test("Can query an array table") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("ARRAY_TEST_TABLE", Array("ID", "VCARRAY"),
-      conf = hbaseConfiguration)
-
-    df1.registerTempTable("ARRAY_TEST_TABLE")
-
-    val sqlRdd = sqlContext.sql("SELECT * FROM ARRAY_TEST_TABLE")
-
-    val count = sqlRdd.count()
-
-    // get row 0, column 1, which should be "VCARRAY"
-    val arrayValues = sqlRdd.collect().apply(0).apply(1)
-
-    arrayValues should equal(Array("String1", "String2", "String3"))
-
-    count shouldEqual 1L
-  }
-  
-  test("Can read a table as an RDD") {
-    val rdd1 = sc.phoenixTableAsRDD("ARRAY_TEST_TABLE", Seq("ID", "VCARRAY"),
-      conf = hbaseConfiguration)
-
-    val count = rdd1.count()
-
-    val arrayValues = rdd1.take(1)(0)("VCARRAY")
-
-    arrayValues should equal(Array("String1", "String2", "String3"))
-
-    count shouldEqual 1L
-  }
-
-  test("Can save to phoenix table") {
-    val sqlContext = new SQLContext(sc)
-
-    val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
-
-    sc
-      .parallelize(dataSet)
-      .saveToPhoenix(
-        "OUTPUT_TEST_TABLE",
-        Seq("ID","COL1","COL2"),
-        hbaseConfiguration
-      )
-
-    // Load the results back
-    val stmt = conn.createStatement()
-    val rs = stmt.executeQuery("SELECT ID, COL1, COL2 FROM OUTPUT_TEST_TABLE")
-    val results = ListBuffer[(Long, String, Int)]()
-    while(rs.next()) {
-      results.append((rs.getLong(1), rs.getString(2), rs.getInt(3)))
-    }
-    stmt.close()
-
-    // Verify they match
-    (0 to results.size - 1).foreach { i =>
-      dataSet(i) shouldEqual results(i)
-    }
-  }
-
-  test("Can save Java and Joda dates to Phoenix (no config)") {
-    val dt = new DateTime()
-    val date = new Date()
-
-    val dataSet = List((1L, "1", 1, dt), (2L, "2", 2, date))
-    sc
-      .parallelize(dataSet)
-      .saveToPhoenix(
-        "OUTPUT_TEST_TABLE",
-        Seq("ID","COL1","COL2","COL3"),
-        zkUrl = Some(hbaseConnectionString)
-      )
-
-    // Load the results back
-    val stmt = conn.createStatement()
-    val rs = stmt.executeQuery("SELECT COL3 FROM OUTPUT_TEST_TABLE WHERE ID = 1 OR ID = 2 ORDER BY ID ASC")
-    val results = ListBuffer[java.sql.Date]()
-    while(rs.next()) {
-      results.append(rs.getDate(1))
-    }
-    stmt.close()
-
-    // Verify the epochs are equal
-    results(0).getTime shouldEqual dt.getMillis
-    results(1).getTime shouldEqual date.getTime
-  }
-
-  test("Not specifying a zkUrl or a config quorum URL should fail") {
-    intercept[UnsupportedOperationException] {
-      val sqlContext = new SQLContext(sc)
-      val badConf = new Configuration(hbaseConfiguration)
-      badConf.unset(HConstants.ZOOKEEPER_QUORUM)
-      sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = badConf)
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/623829db/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
new file mode 100644
index 0000000..149baec
--- /dev/null
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -0,0 +1,326 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import java.sql.{Connection, DriverManager}
+import java.util.Date
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.{HConstants, HBaseTestingUtility}
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT
+import org.apache.phoenix.query.BaseTest
+import org.apache.phoenix.schema.ColumnNotFoundException
+import org.apache.phoenix.schema.types.PVarchar
+import org.apache.phoenix.util.ColumnInfo
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.types.{StringType, StructField}
+import org.apache.spark.{SparkConf, SparkContext}
+import org.joda.time.DateTime
+import org.scalatest._
+import org.apache.phoenix.spark._
+
+import scala.collection.mutable.ListBuffer
+
+/*
+  Note: If running directly from an IDE, these are the recommended VM parameters:
+  -Xmx1536m -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m
+ */
+
+// Helper object to access the protected abstract static methods hidden in BaseHBaseManagedTimeIT
+object PhoenixSparkITHelper extends BaseHBaseManagedTimeIT {
+  def getTestClusterConfig = BaseHBaseManagedTimeIT.getTestClusterConfig
+  def doSetup = BaseHBaseManagedTimeIT.doSetup()
+  def doTeardown = BaseHBaseManagedTimeIT.doTeardown()
+  def getUrl = BaseTest.getUrl
+}
+
+class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
+  var conn: Connection = _
+  var sc: SparkContext = _
+
+  lazy val hbaseConfiguration = {
+    val conf = PhoenixSparkITHelper.getTestClusterConfig
+    // The zookeeper quorum address defaults to "localhost" which is incorrect, let's fix it
+    val quorum = conf.get("hbase.zookeeper.quorum")
+    val clientPort = conf.get("hbase.zookeeper.property.clientPort")
+    val znodeParent = conf.get("zookeeper.znode.parent")
+    conf.set(HConstants.ZOOKEEPER_QUORUM, s"$quorum:$clientPort:$znodeParent")
+    conf
+  }
+
+  lazy val quorumAddress = {
+    hbaseConfiguration.get(HConstants.ZOOKEEPER_QUORUM)
+  }
+
+  override def beforeAll() {
+    PhoenixSparkITHelper.doSetup
+
+    conn = DriverManager.getConnection(PhoenixSparkITHelper.getUrl)
+    conn.setAutoCommit(true)
+
+    // each SQL statement used to set up Phoenix must be on a single line. Yes, that
+    // can potentially make large lines.
+    val setupSqlSource = getClass.getClassLoader.getResourceAsStream("setup.sql")
+
+    val setupSql = scala.io.Source.fromInputStream(setupSqlSource).getLines()
+
+    for (sql <- setupSql) {
+      val stmt = conn.createStatement()
+      stmt.execute(sql)
+    }
+    conn.commit()
+
+    val conf = new SparkConf()
+      .setAppName("PhoenixSparkIT")
+      .setMaster("local[2]") // 2 threads, some parallelism
+      .set("spark.ui.showConsoleProgress", "false") // Disable printing stage progress
+
+    sc = new SparkContext(conf)
+  }
+
+  override def afterAll() {
+    conn.close()
+    sc.stop()
+    PhoenixSparkITHelper.doTeardown
+  }
+
+  def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
+    val query = "SELECT %s FROM \"%s\"" format(columns.map(f => "\"" + f + "\"").mkString(", "), table)
+
+    query + (predicate match {
+      case Some(p: String) => " WHERE " + p
+      case _ => ""
+    })
+  }
+
+  test("Can create valid SQL") {
+    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
+      conf = hbaseConfiguration)
+
+    rdd.buildSql("MyTable", Array("Foo", "Bar"), None) should
+      equal("SELECT \"Foo\", \"Bar\" FROM \"MyTable\"")
+  }
+
+  test("Can convert Phoenix schema") {
+    val phoenixSchema = List(
+      new ColumnInfo("varcharColumn", PVarchar.INSTANCE.getSqlType)
+    )
+
+    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
+      conf = hbaseConfiguration)
+
+    val catalystSchema = rdd.phoenixSchemaToCatalystSchema(phoenixSchema)
+
+    val expected = List(StructField("varcharColumn", StringType, nullable = true))
+
+    catalystSchema shouldEqual expected
+  }
+
+  test("Can create schema RDD and execute query") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+
+    df1.registerTempTable("sql_table_1")
+
+    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
+      conf = hbaseConfiguration)
+
+    df2.registerTempTable("sql_table_2")
+
+    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 6L
+  }
+
+  test("Can create schema RDD and execute query on case sensitive table (no config)") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"), zkUrl = Some(quorumAddress))
+
+    df1.registerTempTable("table3")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 2L
+  }
+
+  test("Can create schema RDD and execute constrained query") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+
+    df1.registerTempTable("sql_table_1")
+
+    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
+      predicate = Some("\"ID\" = 1"),
+      conf = hbaseConfiguration)
+
+    df2.registerTempTable("sql_table_2")
+
+    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 1L
+  }
+
+  test("Using a predicate referring to a non-existent column should fail") {
+    intercept[RuntimeException] {
+      val sqlContext = new SQLContext(sc)
+
+      val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+        predicate = Some("foo = bar"),
+        conf = hbaseConfiguration)
+
+      df1.registerTempTable("table3")
+
+      val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+      // we have to execute an action before the predicate failure can occur
+      val count = sqlRdd.count()
+    }.getCause shouldBe a [ColumnNotFoundException]
+  }
+
+  test("Can create schema RDD with predicate that will never match") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+      predicate = Some("\"id\" = -1"),
+      conf = hbaseConfiguration)
+
+    df1.registerTempTable("table3")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 0L
+  }
+
+  test("Can create schema RDD with complex predicate") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("DATE_PREDICATE_TEST_TABLE", Array("ID", "TIMESERIES_KEY"),
+      predicate = Some("ID > 0 AND TIMESERIES_KEY BETWEEN CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)"),
+      conf = hbaseConfiguration)
+    
+    df1.registerTempTable("date_predicate_test_table")
+
+    val sqlRdd = df1.sqlContext.sql("SELECT * FROM date_predicate_test_table")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 0L
+  }
+
+  test("Can query an array table") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("ARRAY_TEST_TABLE", Array("ID", "VCARRAY"),
+      conf = hbaseConfiguration)
+
+    df1.registerTempTable("ARRAY_TEST_TABLE")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM ARRAY_TEST_TABLE")
+
+    val count = sqlRdd.count()
+
+    // get row 0, column 1, which should be "VCARRAY"
+    val arrayValues = sqlRdd.collect().apply(0).apply(1)
+
+    arrayValues should equal(Array("String1", "String2", "String3"))
+
+    count shouldEqual 1L
+  }
+  
+  test("Can read a table as an RDD") {
+    val rdd1 = sc.phoenixTableAsRDD("ARRAY_TEST_TABLE", Seq("ID", "VCARRAY"),
+      conf = hbaseConfiguration)
+
+    val count = rdd1.count()
+
+    val arrayValues = rdd1.take(1)(0)("VCARRAY")
+
+    arrayValues should equal(Array("String1", "String2", "String3"))
+
+    count shouldEqual 1L
+  }
+
+  test("Can save to phoenix table") {
+    val sqlContext = new SQLContext(sc)
+
+    val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
+
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "OUTPUT_TEST_TABLE",
+        Seq("ID","COL1","COL2"),
+        hbaseConfiguration
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT ID, COL1, COL2 FROM OUTPUT_TEST_TABLE")
+    val results = ListBuffer[(Long, String, Int)]()
+    while(rs.next()) {
+      results.append((rs.getLong(1), rs.getString(2), rs.getInt(3)))
+    }
+
+    // Verify they match
+    (0 to results.size - 1).foreach { i =>
+      dataSet(i) shouldEqual results(i)
+    }
+  }
+
+  test("Can save Java and Joda dates to Phoenix (no config)") {
+    val dt = new DateTime()
+    val date = new Date()
+
+    val dataSet = List((1L, "1", 1, dt), (2L, "2", 2, date))
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "OUTPUT_TEST_TABLE",
+        Seq("ID","COL1","COL2","COL3"),
+        zkUrl = Some(quorumAddress)
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT COL3 FROM OUTPUT_TEST_TABLE WHERE ID = 1 OR ID = 2 ORDER BY ID ASC")
+    val results = ListBuffer[java.sql.Date]()
+    while(rs.next()) {
+      results.append(rs.getDate(1))
+    }
+
+    // Verify the epochs are equal
+    results(0).getTime shouldEqual dt.getMillis
+    results(1).getTime shouldEqual date.getTime
+  }
+
+  test("Not specifying a zkUrl or a config quorum URL should fail") {
+    intercept[UnsupportedOperationException] {
+      val sqlContext = new SQLContext(sc)
+      val badConf = new Configuration(hbaseConfiguration)
+      badConf.unset(HConstants.ZOOKEEPER_QUORUM)
+      sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = badConf)
+    }
+  }
+}


[06/50] [abbrv] phoenix git commit: PHOENIX-1457 Use high priority queue for metadata endpoint calls

Posted by ma...@apache.org.
PHOENIX-1457 Use high priority queue for metadata endpoint calls


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a7d7dfb5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a7d7dfb5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a7d7dfb5

Branch: refs/heads/calcite
Commit: a7d7dfb52622a586482030ae6904d0c53ed7a4af
Parents: b256fde
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Tue Mar 24 17:17:44 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Fri Mar 27 11:45:41 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/index/IndexHandlerIT.java   |  12 +-
 .../phoenix/end2end/index/IndexQosIT.java       | 243 -------------------
 .../apache/phoenix/rpc/PhoenixClientRpcIT.java  | 122 ++++++++++
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 235 ++++++++++++++++++
 .../TestPhoenixIndexRpcSchedulerFactory.java    |  64 +++++
 .../hbase/ipc/PhoenixIndexRpcScheduler.java     | 123 ----------
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 123 ++++++++++
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java   |  95 ++++++++
 .../controller/ClientRpcControllerFactory.java  |  60 +++++
 .../ipc/controller/IndexRpcController.java      |  51 ++++
 .../ipc/controller/MetadataRpcController.java   |  55 +++++
 .../controller/ServerRpcControllerFactory.java  |  62 +++++
 .../index/IndexQosRpcControllerFactory.java     |  82 -------
 .../ipc/PhoenixIndexRpcSchedulerFactory.java    |  90 -------
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   4 -
 .../org/apache/phoenix/query/QueryServices.java |   5 +-
 .../phoenix/query/QueryServicesOptions.java     |  12 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   7 -
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  16 +-
 .../PhoenixIndexRpcSchedulerFactoryTest.java    | 106 --------
 .../PhoenixRpcSchedulerFactoryTest.java         | 125 ++++++++++
 .../java/org/apache/phoenix/query/BaseTest.java |  12 +-
 22 files changed, 1023 insertions(+), 681 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
index 1507d6b..20a780a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
 import org.apache.phoenix.hbase.index.TableName;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.junit.After;
@@ -53,11 +53,11 @@ public class IndexHandlerIT {
 
     public static class CountingIndexClientRpcFactory extends RpcControllerFactory {
 
-        private IndexQosRpcControllerFactory delegate;
+        private ServerRpcControllerFactory delegate;
 
         public CountingIndexClientRpcFactory(Configuration conf) {
             super(conf);
-            this.delegate = new IndexQosRpcControllerFactory(conf);
+            this.delegate = new ServerRpcControllerFactory(conf);
         }
 
         @Override
@@ -146,8 +146,8 @@ public class IndexHandlerIT {
         conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
             CountingIndexClientRpcFactory.class.getName());
         // and set the index table as the current table
-        conf.setStrings(IndexQosRpcControllerFactory.INDEX_TABLE_NAMES_KEY,
-            TestTable.getTableNameString());
+//        conf.setStrings(PhoenixRpcControllerFactory.INDEX_TABLE_NAMES_KEY,
+//            TestTable.getTableNameString());
         HTable table = new HTable(conf, TestTable.getTableName());
 
         // do a write to the table
@@ -159,7 +159,7 @@ public class IndexHandlerIT {
         // check the counts on the rpc controller
         assertEquals("Didn't get the expected number of index priority writes!", 1,
             (int) CountingIndexClientRpcController.priorityCounts
-                    .get(QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY));
+                    .get(QueryServicesOptions.DEFAULT_INDEX_PRIORITY));
 
         table.close();
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
deleted file mode 100644
index bab8f38..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
- * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
- */
-package org.apache.phoenix.end2end.index;
-
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
-import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
-import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
-import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
-import org.apache.hadoop.hbase.ipc.CallRunner;
-import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
-import org.apache.hadoop.hbase.ipc.PriorityFunction;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.RpcExecutor;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
-import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.jdbc.PhoenixTestDriver;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-
-@Category(NeedsOwnMiniClusterTest.class)
-public class IndexQosIT extends BaseTest {
-
-    private static final String SCHEMA_NAME = "S";
-    private static final String INDEX_TABLE_NAME = "I";
-    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
-    private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
-    private static final int NUM_SLAVES = 2;
-
-    private static String url;
-    private static PhoenixTestDriver driver;
-    private HBaseTestingUtility util;
-    private HBaseAdmin admin;
-    private Configuration conf;
-    private static RpcExecutor spyRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-queue", 30, 1, 300));
-
-    /**
-     * Factory that uses a spyed RpcExecutor
-     */
-    public static class TestPhoenixIndexRpcSchedulerFactory extends PhoenixIndexRpcSchedulerFactory {
-        @Override
-        public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
-            PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = (PhoenixIndexRpcScheduler)super.create(conf, priorityFunction, abortable);
-            phoenixIndexRpcScheduler.setExecutorForTesting(spyRpcExecutor);
-            return phoenixIndexRpcScheduler;
-        }
-    }
-
-    @Before
-    public void doSetup() throws Exception {
-        conf = HBaseConfiguration.create();
-        setUpConfigForMiniCluster(conf);
-        conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-                TestPhoenixIndexRpcSchedulerFactory.class.getName());
-        conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, IndexQosRpcControllerFactory.class.getName());
-        util = new HBaseTestingUtility(conf);
-        // start cluster with 2 region servers
-        util.startMiniCluster(NUM_SLAVES);
-        admin = util.getHBaseAdmin();
-        String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
-        url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
-                + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
-        driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS);
-    }
-
-    @After
-    public void tearDown() throws Exception {
-        try {
-            destroyDriver(driver);
-            if (admin!=null) {
-            	admin.close();
-            }
-        } finally {
-            util.shutdownMiniCluster();
-        }
-    }
-    
-    @Test
-    public void testIndexWriteQos() throws Exception { 
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = driver.connect(url, props);
-
-        // create the table 
-        conn.createStatement().execute(
-                "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-
-        // create the index 
-        conn.createStatement().execute(
-                "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
-
-        byte[] dataTableName = Bytes.toBytes(DATA_TABLE_FULL_NAME);
-        byte[] indexTableName = Bytes.toBytes(INDEX_TABLE_FULL_NAME);
-        MiniHBaseCluster cluster = util.getHBaseCluster();
-        HMaster master = cluster.getMaster();
-        AssignmentManager am = master.getAssignmentManager();
-
-        // verify there is only a single region for data table
-        List<HRegionInfo> tableRegions = admin.getTableRegions(dataTableName);
-        assertEquals("Expected single region for " + dataTableName, tableRegions.size(), 1);
-        HRegionInfo dataHri = tableRegions.get(0);
-
-        // verify there is only a single region for index table
-        tableRegions = admin.getTableRegions(indexTableName);
-        HRegionInfo indexHri = tableRegions.get(0);
-        assertEquals("Expected single region for " + indexTableName, tableRegions.size(), 1);
-
-        ServerName dataServerName = am.getRegionStates().getRegionServerOfRegion(dataHri);
-        ServerName indexServerName = am.getRegionStates().getRegionServerOfRegion(indexHri);
-
-        // if data table and index table are on same region server, move the index table to the other region server
-        if (dataServerName.equals(indexServerName)) {
-            HRegionServer server1 = util.getHBaseCluster().getRegionServer(0);
-            HRegionServer server2 = util.getHBaseCluster().getRegionServer(1);
-            HRegionServer dstServer = null;
-            HRegionServer srcServer = null;
-            if (server1.getServerName().equals(indexServerName)) {
-                dstServer = server2;
-                srcServer = server1;
-            } else {
-                dstServer = server1;
-                srcServer = server2;
-            }
-            byte[] encodedRegionNameInBytes = indexHri.getEncodedNameAsBytes();
-            admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
-            while (dstServer.getOnlineRegion(indexHri.getRegionName()) == null
-                    || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
-                    || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
-                    || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
-                // wait for the move to be finished
-                Thread.sleep(1);
-            }
-        }
-
-        dataHri = admin.getTableRegions(dataTableName).get(0);
-        dataServerName = am.getRegionStates().getRegionServerOfRegion(dataHri);
-        indexHri = admin.getTableRegions(indexTableName).get(0);
-        indexServerName = am.getRegionStates().getRegionServerOfRegion(indexHri);
-
-        // verify index and data tables are on different servers
-        assertNotEquals("Index and Data table should be on different region servers dataServer " + dataServerName
-                + " indexServer " + indexServerName, dataServerName, indexServerName);
-
-        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "k1");
-        stmt.setString(2, "v1");
-        stmt.setString(3, "v2");
-        stmt.execute();
-        conn.commit();
-
-        // run select query that should use the index
-        String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
-        stmt = conn.prepareStatement(selectSql);
-        stmt.setString(1, "v1");
-
-        // verify that the query does a range scan on the index table
-        ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
-        assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
-
-        // verify that the correct results are returned
-        rs = stmt.executeQuery();
-        assertTrue(rs.next());
-        assertEquals("k1", rs.getString(1));
-        assertEquals("v2", rs.getString(2));
-        assertFalse(rs.next());
-        
-        // drop index table 
-        conn.createStatement().execute(
-                "DROP INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME );
-        // create a data table with the same name as the index table 
-        conn.createStatement().execute(
-                "CREATE TABLE " + INDEX_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-        
-        // upsert one row to the table (which has the same table name as the previous index table)
-        stmt = conn.prepareStatement("UPSERT INTO " + INDEX_TABLE_FULL_NAME + " VALUES(?,?,?)");
-        stmt.setString(1, "k1");
-        stmt.setString(2, "v1");
-        stmt.setString(3, "v2");
-        stmt.execute();
-        conn.commit();
-        
-        // run select query on the new table
-        selectSql = "SELECT k, v2 from " + INDEX_TABLE_FULL_NAME + " WHERE v1=?";
-        stmt = conn.prepareStatement(selectSql);
-        stmt.setString(1, "v1");
-
-        // verify that the correct results are returned
-        rs = stmt.executeQuery();
-        assertTrue(rs.next());
-        assertEquals("k1", rs.getString(1));
-        assertEquals("v2", rs.getString(2));
-        assertFalse(rs.next());
-        
-        // verify that that index queue is used only once (for the first upsert)
-        Mockito.verify(spyRpcExecutor).dispatch(Mockito.any(CallRunner.class));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
new file mode 100644
index 0000000..c079a30
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.rpc;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.google.common.collect.Maps;
+
+public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
+
+    private static final String SCHEMA_NAME = "S";
+    private static final String INDEX_TABLE_NAME = "I";
+    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
+
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+        serverProps.put(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                TestPhoenixIndexRpcSchedulerFactory.class.getName());
+        serverProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ServerRpcControllerFactory.class.getName());
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
+        clientProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ClientRpcControllerFactory.class.getName());
+        NUM_SLAVES_BASE = 2;
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet()
+                .iterator()));
+    }
+    
+    @AfterClass
+    public static void doTeardown() throws Exception {
+        TestPhoenixIndexRpcSchedulerFactory.reset();
+    }
+
+    @Test
+    public void testIndexQos() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+            // create the table
+            conn.createStatement().execute(
+                    "CREATE TABLE " + DATA_TABLE_FULL_NAME
+                            + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true");
+
+            // create the index
+            conn.createStatement().execute(
+                    "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
+
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+            stmt.setString(1, "k1");
+            stmt.setString(2, "v1");
+            stmt.setString(3, "v2");
+            stmt.execute();
+            conn.commit();
+
+            // run select query that should use the index
+            String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
+            stmt = conn.prepareStatement(selectSql);
+            stmt.setString(1, "v1");
+
+            // verify that the query does a range scan on the index table
+            ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
+            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
+
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("k1", rs.getString(1));
+            assertEquals("v2", rs.getString(2));
+            assertFalse(rs.next());
+
+            // verify that index queue is not used (since the index writes originate from a client an not a region server)
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor(), Mockito.never()).dispatch(Mockito.any(CallRunner.class));
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testMetadataQos() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+            // create the table
+            conn.createStatement().execute("CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
+            // verify that that metadata queue is used at least once
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getMetadataRpcExecutor(), Mockito.atLeastOnce()).dispatch(Mockito.any(CallRunner.class));
+        } finally {
+            conn.close();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
new file mode 100644
index 0000000..de0ab84
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.rpc;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.google.common.collect.Maps;
+
+public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
+
+    private static final String SCHEMA_NAME = "S";
+    private static final String INDEX_TABLE_NAME = "I";
+    private static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
+    private static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
+    
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
+        serverProps.put(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+                TestPhoenixIndexRpcSchedulerFactory.class.getName());
+        serverProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ServerRpcControllerFactory.class.getName());
+        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
+        clientProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, RpcControllerFactory.class.getName());
+        NUM_SLAVES_BASE = 2;
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
+    }
+    
+    @AfterClass
+    public static void doTeardown() throws Exception {
+        TestPhoenixIndexRpcSchedulerFactory.reset();
+    }
+    
+    @Test
+    public void testIndexQos() throws Exception { 
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+            // create the table 
+            conn.createStatement().execute(
+                    "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+    
+            // create the index 
+            conn.createStatement().execute(
+                    "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
+
+            ensureTablesOnDifferentRegionServers(DATA_TABLE_FULL_NAME, INDEX_TABLE_FULL_NAME);
+    
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
+            stmt.setString(1, "k1");
+            stmt.setString(2, "v1");
+            stmt.setString(3, "v2");
+            stmt.execute();
+            conn.commit();
+    
+            // run select query that should use the index
+            String selectSql = "SELECT k, v2 from " + DATA_TABLE_FULL_NAME + " WHERE v1=?";
+            stmt = conn.prepareStatement(selectSql);
+            stmt.setString(1, "v1");
+    
+            // verify that the query does a range scan on the index table
+            ResultSet rs = stmt.executeQuery("EXPLAIN " + selectSql);
+            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER S.I ['v1']", QueryUtil.getExplainPlan(rs));
+    
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("k1", rs.getString(1));
+            assertEquals("v2", rs.getString(2));
+            assertFalse(rs.next());
+            
+            // drop index table 
+            conn.createStatement().execute(
+                    "DROP INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME );
+            // create a data table with the same name as the index table 
+            conn.createStatement().execute(
+                    "CREATE TABLE " + INDEX_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+            
+            // upsert one row to the table (which has the same table name as the previous index table)
+            stmt = conn.prepareStatement("UPSERT INTO " + INDEX_TABLE_FULL_NAME + " VALUES(?,?,?)");
+            stmt.setString(1, "k1");
+            stmt.setString(2, "v1");
+            stmt.setString(3, "v2");
+            stmt.execute();
+            conn.commit();
+            
+            // run select query on the new table
+            selectSql = "SELECT k, v2 from " + INDEX_TABLE_FULL_NAME + " WHERE v1=?";
+            stmt = conn.prepareStatement(selectSql);
+            stmt.setString(1, "v1");
+    
+            // verify that the correct results are returned
+            rs = stmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals("k1", rs.getString(1));
+            assertEquals("v2", rs.getString(2));
+            assertFalse(rs.next());
+            
+            // verify that that index queue is used only once (for the first upsert)
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
+        }
+        finally {
+            conn.close();
+        }
+    }
+
+	/**
+	 * Verifies that the given tables each have a single region and are on
+	 * different region servers. If they are on the same server moves tableName2
+	 * to the other region server.
+	 */
+	private void ensureTablesOnDifferentRegionServers(String tableName1, String tableName2) throws Exception  {
+		byte[] table1 = Bytes.toBytes(tableName1);
+		byte[] table2 = Bytes.toBytes(tableName2);
+		HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
+		HBaseTestingUtility util = getUtility();
+		MiniHBaseCluster cluster = util.getHBaseCluster();
+		HMaster master = cluster.getMaster();
+		AssignmentManager am = master.getAssignmentManager();
+   
+		// verify there is only a single region for data table
+		List<HRegionInfo> tableRegions = admin.getTableRegions(table1);
+		assertEquals("Expected single region for " + table1, tableRegions.size(), 1);
+		HRegionInfo hri1 = tableRegions.get(0);
+   
+		// verify there is only a single region for index table
+		tableRegions = admin.getTableRegions(table2);
+		HRegionInfo hri2 = tableRegions.get(0);
+		assertEquals("Expected single region for " + table2, tableRegions.size(), 1);
+   
+		ServerName serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
+		ServerName serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
+   
+		// if data table and index table are on same region server, move the index table to the other region server
+		if (serverName1.equals(serverName2)) {
+		    HRegionServer server1 = util.getHBaseCluster().getRegionServer(0);
+		    HRegionServer server2 = util.getHBaseCluster().getRegionServer(1);
+		    HRegionServer dstServer = null;
+		    HRegionServer srcServer = null;
+		    if (server1.getServerName().equals(serverName2)) {
+		        dstServer = server2;
+		        srcServer = server1;
+		    } else {
+		        dstServer = server1;
+		        srcServer = server2;
+		    }
+		    byte[] encodedRegionNameInBytes = hri2.getEncodedNameAsBytes();
+		    admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
+		    while (dstServer.getOnlineRegion(hri2.getRegionName()) == null
+		            || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+		            || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+		            || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
+		        // wait for the move to be finished
+		        Thread.sleep(1);
+		    }
+		}
+   
+		hri1 = admin.getTableRegions(table1).get(0);
+		serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
+		hri2 = admin.getTableRegions(table2).get(0);
+		serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
+
+		// verify index and data tables are on different servers
+		assertNotEquals("Tables " + tableName1 + " and " + tableName2 + " should be on different region servers", serverName1, serverName2);
+	}
+    
+    @Test
+    public void testMetadataQos() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = driver.connect(getUrl(), props);
+        try {
+        	ensureTablesOnDifferentRegionServers(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
+            // create the table 
+            conn.createStatement().execute(
+                    "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
+            // query the table from another connection, so that SYSTEM.STATS will be used 
+            conn.createStatement().execute("SELECT * FROM "+DATA_TABLE_FULL_NAME);
+            // verify that that metadata queue is used once 
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getMetadataRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
+        }
+        finally {
+            conn.close();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
new file mode 100644
index 0000000..fb29985
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.rpc;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RpcExecutor;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.mockito.Mockito;
+
+public class TestPhoenixIndexRpcSchedulerFactory extends PhoenixRpcSchedulerFactory {
+    
+    private static RpcExecutor indexRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-index-queue", 30, 1,
+            300));
+    private static RpcExecutor metadataRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-metataqueue", 30,
+            1, 300));
+
+    @Override
+    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
+        PhoenixRpcScheduler phoenixIndexRpcScheduler = (PhoenixRpcScheduler)super.create(conf, priorityFunction, abortable);
+        phoenixIndexRpcScheduler.setIndexExecutorForTesting(indexRpcExecutor);
+        phoenixIndexRpcScheduler.setMetadataExecutorForTesting(metadataRpcExecutor);
+        return phoenixIndexRpcScheduler;
+    }
+    
+    @Override
+    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
+        return create(configuration, priorityFunction, null);
+    }
+    
+    public static RpcExecutor getIndexRpcExecutor() {
+        return indexRpcExecutor;
+    }
+    
+    public static RpcExecutor getMetadataRpcExecutor() {
+        return metadataRpcExecutor;
+    }
+    
+    public static void reset() {
+        Mockito.reset(metadataRpcExecutor);
+        Mockito.reset(indexRpcExecutor);
+    }
+}
+
+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
deleted file mode 100644
index 4709304..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcScheduler.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.ipc;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * {@link RpcScheduler} that first checks to see if this is an index update before passing off the
- * call to the delegate {@link RpcScheduler}.
- * <p>
- * We reserve the range (1000, 1050], by default (though it is configurable), for index priority
- * writes. Currently, we don't do any prioritization within that range - all index writes are
- * treated with the same priority and put into the same queue.
- */
-public class PhoenixIndexRpcScheduler extends RpcScheduler {
-
-    // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
-    public static final String CALL_QUEUE_READ_SHARE_CONF_KEY = "ipc.server.callqueue.read.share";
-    public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY =
-            "ipc.server.callqueue.handler.factor";
-    private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
-
-    private RpcScheduler delegate;
-    private int minPriority;
-    private int maxPriority;
-    private RpcExecutor callExecutor;
-    private int port;
-
-    public PhoenixIndexRpcScheduler(int indexHandlerCount, Configuration conf,
-            RpcScheduler delegate, int minPriority, int maxPriority) {
-        int maxQueueLength =
-                conf.getInt("ipc.server.max.callqueue.length", indexHandlerCount
-                        * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
-
-        // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
-        float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
-        int numCallQueues =
-                Math.max(1, Math.round(indexHandlerCount * callQueuesHandlersFactor));
-
-        this.minPriority = minPriority;
-        this.maxPriority = maxPriority;
-        this.delegate = delegate;
-
-        this.callExecutor =
-                new BalancedQueueRpcExecutor("Index", indexHandlerCount, numCallQueues,
-                        maxQueueLength);
-    }
-
-    @Override
-    public void init(Context context) {
-        delegate.init(context);
-        this.port = context.getListenerAddress().getPort();
-    }
-
-    @Override
-    public void start() {
-        delegate.start();
-        callExecutor.start(port);
-    }
-
-    @Override
-    public void stop() {
-        delegate.stop();
-        callExecutor.stop();
-    }
-
-    @Override
-    public void dispatch(CallRunner callTask) throws InterruptedException, IOException {
-        RpcServer.Call call = callTask.getCall();
-        int priority = call.header.getPriority();
-        if (minPriority <= priority && priority < maxPriority) {
-            callExecutor.dispatch(callTask);
-        } else {
-            delegate.dispatch(callTask);
-        }
-    }
-
-    @Override
-    public int getGeneralQueueLength() {
-        // not the best way to calculate, but don't have a better way to hook
-        // into metrics at the moment
-        return this.delegate.getGeneralQueueLength() + this.callExecutor.getQueueLength();
-    }
-
-    @Override
-    public int getPriorityQueueLength() {
-        return this.delegate.getPriorityQueueLength();
-    }
-
-    @Override
-    public int getReplicationQueueLength() {
-        return this.delegate.getReplicationQueueLength();
-    }
-
-    @Override
-    public int getActiveRpcHandlerCount() {
-        return this.delegate.getActiveRpcHandlerCount() + this.callExecutor.getActiveHandlerCount();
-    }
-
-    @VisibleForTesting
-    public void setExecutorForTesting(RpcExecutor executor) {
-        this.callExecutor = executor;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
new file mode 100644
index 0000000..e721271
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * {@link RpcScheduler} that first checks to see if this is an index or metedata update before passing off the
+ * call to the delegate {@link RpcScheduler}.
+ */
+public class PhoenixRpcScheduler extends RpcScheduler {
+
+    // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
+    private static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = "ipc.server.callqueue.handler.factor";
+    private static final String CALLQUEUE_LENGTH_CONF_KEY = "ipc.server.max.callqueue.length";
+    private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
+
+    private RpcScheduler delegate;
+    private int indexPriority;
+    private int metadataPriority;
+    private RpcExecutor indexCallExecutor;
+    private RpcExecutor metadataCallExecutor;
+    private int port;
+
+    public PhoenixRpcScheduler(Configuration conf, RpcScheduler delegate, int indexPriority, int metadataPriority) {
+        // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
+        int maxQueueLength =  conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
+        float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
+        int numQueues = Math.max(1, Math.round(callQueuesHandlersFactor));
+
+        this.indexPriority = indexPriority;
+        this.metadataPriority = metadataPriority;
+        this.delegate = delegate;
+        this.indexCallExecutor = new BalancedQueueRpcExecutor("Index", 1, numQueues, maxQueueLength);
+        this.metadataCallExecutor = new BalancedQueueRpcExecutor("Metadata", 1, numQueues, maxQueueLength);
+    }
+
+    @Override
+    public void init(Context context) {
+        delegate.init(context);
+        this.port = context.getListenerAddress().getPort();
+    }
+
+    @Override
+    public void start() {
+        delegate.start();
+        indexCallExecutor.start(port);
+        metadataCallExecutor.start(port);
+    }
+
+    @Override
+    public void stop() {
+        delegate.stop();
+        indexCallExecutor.stop();
+        metadataCallExecutor.stop();
+    }
+
+    @Override
+    public void dispatch(CallRunner callTask) throws InterruptedException, IOException {
+        RpcServer.Call call = callTask.getCall();
+        int priority = call.header.getPriority();
+        if (indexPriority == priority) {
+            indexCallExecutor.dispatch(callTask);
+        } else if (metadataPriority == priority) {
+            metadataCallExecutor.dispatch(callTask);
+        } else {
+            delegate.dispatch(callTask);
+        }
+    }
+
+    @Override
+    public int getGeneralQueueLength() {
+        // not the best way to calculate, but don't have a better way to hook
+        // into metrics at the moment
+        return this.delegate.getGeneralQueueLength() + this.indexCallExecutor.getQueueLength() + this.metadataCallExecutor.getQueueLength();
+    }
+
+    @Override
+    public int getPriorityQueueLength() {
+        return this.delegate.getPriorityQueueLength();
+    }
+
+    @Override
+    public int getReplicationQueueLength() {
+        return this.delegate.getReplicationQueueLength();
+    }
+
+    @Override
+    public int getActiveRpcHandlerCount() {
+        return this.delegate.getActiveRpcHandlerCount() + this.indexCallExecutor.getActiveHandlerCount() + this.metadataCallExecutor.getActiveHandlerCount();
+    }
+
+    @VisibleForTesting
+    public void setIndexExecutorForTesting(RpcExecutor executor) {
+        this.indexCallExecutor = executor;
+    }
+    
+    @VisibleForTesting
+    public void setMetadataExecutorForTesting(RpcExecutor executor) {
+        this.metadataCallExecutor = executor;
+    }
+    
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
new file mode 100644
index 0000000..a697382
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
+import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Factory to create a {@link PhoenixRpcScheduler}. In this package so we can access the
+ * {@link SimpleRpcSchedulerFactory}.
+ */
+public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixRpcSchedulerFactory.class);
+
+    private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
+            "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
+
+    @Override
+    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
+        // create the delegate scheduler
+        RpcScheduler delegate;
+        try {
+            // happens in <=0.98.4 where the scheduler factory is not visible
+            delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
+        } catch (IllegalAccessError e) {
+            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
+            throw e;
+        }
+
+        // get the index priority configs
+        int indexPriority = getIndexPriority(conf);
+        validatePriority(indexPriority);
+        // get the metadata priority configs
+        int metadataPriority = getMetadataPriority(conf);
+        validatePriority(metadataPriority);
+
+        // validate index and metadata priorities are not the same
+        Preconditions.checkArgument(indexPriority != metadataPriority, "Index and Metadata priority must not be same "+ indexPriority);
+        LOG.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
+
+        PhoenixRpcScheduler scheduler =
+                new PhoenixRpcScheduler(conf, delegate, indexPriority, metadataPriority);
+        return scheduler;
+    }
+
+    @Override
+    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
+        return create(configuration, priorityFunction, null);
+    }
+
+    /**
+     * Validates that the given priority does not overlap with the HBase priority range
+     */
+    private void validatePriority(int priority) {
+        Preconditions.checkArgument( priority < HConstants.NORMAL_QOS || priority > HConstants.HIGH_QOS, "priority cannot be within hbase priority range " 
+        			+ HConstants.NORMAL_QOS +" to " + HConstants.HIGH_QOS ); 
+    }
+
+    public static int getIndexPriority(Configuration conf) {
+        return conf.getInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_PRIORITY);
+    }
+    
+    public static int getMetadataPriority(Configuration conf) {
+        return conf.getInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_METADATA_PRIORITY);
+    }
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
new file mode 100644
index 0000000..5a7dcc2
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+/**
+ * {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed
+ * in its own queue.
+ */
+public class ClientRpcControllerFactory extends RpcControllerFactory {
+
+    public ClientRpcControllerFactory(Configuration conf) {
+        super(conf);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController() {
+        PayloadCarryingRpcController delegate = super.newController();
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
+        PayloadCarryingRpcController delegate = super.newController(cellScanner);
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables) {
+        PayloadCarryingRpcController delegate = super.newController(cellIterables);
+        return getController(delegate);
+    }
+    
+    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate) {
+		return new MetadataRpcController(delegate, conf);
+    }
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
new file mode 100644
index 0000000..fdb1d33
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+class IndexRpcController extends DelegatingPayloadCarryingRpcController {
+
+    private final int priority;
+    private final String tracingTableName;
+    
+    public IndexRpcController(PayloadCarryingRpcController delegate, Configuration conf) {
+        super(delegate);
+        this.priority = PhoenixRpcSchedulerFactory.getIndexPriority(conf);
+        this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
+                QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+    }
+    
+    @Override
+    public void setPriority(final TableName tn) {
+		if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) {
+			setPriority(this.priority);
+		}  
+        else {
+            super.setPriority(tn);
+        }
+    }
+    
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
new file mode 100644
index 0000000..23b9f03
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+
+import com.google.common.collect.ImmutableList;
+
+class MetadataRpcController extends DelegatingPayloadCarryingRpcController {
+
+	private int priority;
+	// list of system tables
+	private static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>()
+			.add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)
+			.add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
+			.add(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME).build();
+
+	public MetadataRpcController(PayloadCarryingRpcController delegate,
+			Configuration conf) {
+		super(delegate);
+		this.priority = PhoenixRpcSchedulerFactory.getMetadataPriority(conf);
+	}
+
+	@Override
+	public void setPriority(final TableName tn) {
+		if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) {
+			setPriority(this.priority);
+		} else {
+			super.setPriority(tn);
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
new file mode 100644
index 0000000..8c17eda
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc.controller;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+
+/**
+ * {@link RpcControllerFactory} that sets the priority of index and metadata rpc calls
+ * so that they are each processed in their own queues
+ */
+public class ServerRpcControllerFactory extends RpcControllerFactory {
+
+    public ServerRpcControllerFactory(Configuration conf) {
+        super(conf);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController() {
+        PayloadCarryingRpcController delegate = super.newController();
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
+        PayloadCarryingRpcController delegate = super.newController(cellScanner);
+        return getController(delegate);
+    }
+
+    @Override
+    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables) {
+        PayloadCarryingRpcController delegate = super.newController(cellIterables);
+        return getController(delegate);
+    }
+    
+    private PayloadCarryingRpcController getController(PayloadCarryingRpcController delegate) {
+    	// construct a chain of controllers: metadata, index and standard controller
+    	IndexRpcController indexRpcController = new IndexRpcController(delegate, conf);
+		return new MetadataRpcController(indexRpcController, conf);
+    }
+    
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
deleted file mode 100644
index a192feb..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexQosRpcControllerFactory.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index;
-
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.util.SchemaUtil;
-
-/**
- * {@link RpcControllerFactory} that overrides the standard {@link PayloadCarryingRpcController} to
- * allow the configured index tables (via {@link #INDEX_TABLE_NAMES_KEY}) to use the Index priority.
- */
-public class IndexQosRpcControllerFactory extends RpcControllerFactory {
-
-    public static final String INDEX_TABLE_NAMES_KEY = "phoenix.index.rpc.controller.index-tables";
-
-    public IndexQosRpcControllerFactory(Configuration conf) {
-        super(conf);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController() {
-        PayloadCarryingRpcController delegate = super.newController();
-        return new IndexQosRpcController(delegate, conf);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController(CellScanner cellScanner) {
-        PayloadCarryingRpcController delegate = super.newController(cellScanner);
-        return new IndexQosRpcController(delegate, conf);
-    }
-
-    @Override
-    public PayloadCarryingRpcController newController(List<CellScannable> cellIterables) {
-        PayloadCarryingRpcController delegate = super.newController(cellIterables);
-        return new IndexQosRpcController(delegate, conf);
-    }
-
-    private class IndexQosRpcController extends DelegatingPayloadCarryingRpcController {
-
-        private int priority;
-
-        public IndexQosRpcController(PayloadCarryingRpcController delegate, Configuration conf) {
-            super(delegate);
-            this.priority = PhoenixIndexRpcSchedulerFactory.getMinPriority(conf);
-        }
-        @Override
-        public void setPriority(final TableName tn) {
-            // if its an index table, then we override to the index priority
-            if (!tn.isSystemTable() &&  !SchemaUtil.isSystemDataTable(tn.getNameAsString())) {
-                setPriority(this.priority);
-            } 
-            else {
-                super.setPriority(tn);
-            }
-        }
-
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
deleted file mode 100644
index 1789b0e..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/ipc/PhoenixIndexRpcSchedulerFactory.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.ipc;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
-import org.apache.hadoop.hbase.ipc.PriorityFunction;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
-import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Factory to create a {@link PhoenixIndexRpcScheduler}. In this package so we can access the
- * {@link SimpleRpcSchedulerFactory}.
- */
-public class PhoenixIndexRpcSchedulerFactory implements RpcSchedulerFactory {
-
-    private static final Log LOG = LogFactory.getLog(PhoenixIndexRpcSchedulerFactory.class);
-
-    private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
-            "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
-
-    @Override
-    public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
-        // create the delegate scheduler
-        RpcScheduler delegate;
-        try {
-            // happens in <=0.98.4 where the scheduler factory is not visible
-            delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable);
-        } catch (IllegalAccessError e) {
-            LOG.fatal(VERSION_TOO_OLD_FOR_INDEX_RPC);
-            throw e;
-        }
-
-        int indexHandlerCount = conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT);
-        int minPriority = getMinPriority(conf);
-        int maxPriority = conf.getInt(QueryServices.MAX_INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_MAX_PRIORITY);
-        // make sure the ranges are outside the warning ranges
-        Preconditions.checkArgument(maxPriority > minPriority, "Max index priority (" + maxPriority
-                + ") must be larger than min priority (" + minPriority + ")");
-        boolean allSmaller =
-                minPriority < HConstants.REPLICATION_QOS
-                        && maxPriority < HConstants.REPLICATION_QOS;
-        boolean allLarger = minPriority > HConstants.HIGH_QOS;
-        Preconditions.checkArgument(allSmaller || allLarger, "Index priority range (" + minPriority
-                + ",  " + maxPriority + ") must be outside HBase priority range ("
-                + HConstants.REPLICATION_QOS + ", " + HConstants.HIGH_QOS + ")");
-
-        LOG.info("Using custom Phoenix Index RPC Handling with " + indexHandlerCount
-                + " handlers and priority range [" + minPriority + ", " + maxPriority + ")");
-
-        PhoenixIndexRpcScheduler scheduler =
-                new PhoenixIndexRpcScheduler(indexHandlerCount, conf, delegate, minPriority,
-                        maxPriority);
-        return scheduler;
-    }
-
-    @Override
-    public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) {
-        return create(configuration, priorityFunction, null);
-    }
-
-    public static int getMinPriority(Configuration conf) {
-        return conf.getInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 15bcfd0..1b8b57d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -279,10 +279,6 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     /** Version below which we fall back on the generic KeyValueBuilder */
     public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14");
     
-    // list of system tables
-    public static final List<String> SYSTEM_TABLE_NAMES = new ImmutableList.Builder<String>().add(SYSTEM_CATALOG_NAME)
-            .add(SYSTEM_STATS_NAME).add(SEQUENCE_FULLNAME).build();
-    
     PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException {
         this.emptyResultSet = new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new PhoenixStatement(connection));
         this.connection = connection;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 2eab5dd..65f6acf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -123,9 +123,8 @@ public interface QueryServices extends SQLCloseable {
     // Index will be partially re-built from index disable time stamp - following overlap time
     public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB =
         "phoenix.index.failure.handling.rebuild.overlap.time";
-    public static final String MIN_INDEX_PRIOIRTY_ATTRIB = "phoenix.regionserver.index.priority.min";
-    public static final String MAX_INDEX_PRIOIRTY_ATTRIB = "phoenix.regionserver.index.priority.max";
-    public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.regionserver.index.handler.count";
+    public static final String INDEX_PRIOIRTY_ATTRIB = "phoenix.index.rpc.priority";
+    public static final String METADATA_PRIOIRTY_ATTRIB = "phoenix.metadata.rpc.priority";
     public static final String ALLOW_LOCAL_INDEX_ATTRIB = "phoenix.index.allowLocalIndex";
 
     // Config parameters for for configuring tracing

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 8cd740a..97040d2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -41,6 +41,7 @@ import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_TIME_TO_LI
 import static org.apache.phoenix.query.QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.METRICS_ENABLED;
 import static org.apache.phoenix.query.QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK;
@@ -61,12 +62,13 @@ import static org.apache.phoenix.query.QueryServices.STATS_USE_CURRENT_TIME_ATTR
 import static org.apache.phoenix.query.QueryServices.THREAD_POOL_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.THREAD_TIMEOUT_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB;
-import static org.apache.phoenix.query.QueryServices.METRICS_ENABLED;
 
 import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.trace.util.Tracing;
@@ -138,13 +140,12 @@ public class QueryServicesOptions {
     public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 10000; // 10 secs
     public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 300000; // 5 mins
 
-    public static final int DEFAULT_INDEX_MAX_PRIORITY = 1050;
     /**
      * HConstants#HIGH_QOS is the max we will see to a standard table. We go higher to differentiate
      * and give some room for things in the middle
      */
-    public static final int DEFAULT_INDEX_MIN_PRIORITY = 1000;
-    public static final int DEFAULT_INDEX_HANDLER_COUNT = 30;
+    public static final int DEFAULT_INDEX_PRIORITY = 1000;
+    public static final int DEFAULT_METADATA_PRIORITY = 2000;
     public static final boolean DEFAULT_ALLOW_LOCAL_INDEX = true;
 
     public static final int DEFAULT_TRACING_PAGE_SIZE = 100;
@@ -235,7 +236,8 @@ public class QueryServicesOptions {
             .setIfUnset(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE)
             .setIfUnset(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK)
             .setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK)
-            .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED);
+            .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED)
+            .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ClientRpcControllerFactory.class.getName());
             ;
         // HBase sets this to 1, so we reset it to something more appropriate.
         // Hopefully HBase will change this, because we can't know if a user set

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 4a8341d..46da726 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -404,13 +404,6 @@ public class SchemaUtil {
         return false;
     }
     
-    /**
-     * Returns true if the given table is a system table (does not include future system indexes)
-     */
-    public static boolean isSystemDataTable(String fullTableName) {
-    	return PhoenixDatabaseMetaData.SYSTEM_TABLE_NAMES.contains(fullTableName);
-    }
-
     // Given the splits and the rowKeySchema, find out the keys that 
     public static byte[][] processSplits(byte[][] splits, LinkedHashSet<PColumn> pkColumns, Integer saltBucketNum, boolean defaultRowKeyOrder) throws SQLException {
         // FIXME: shouldn't this return if splits.length == 0?

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7d7dfb5/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
index 8bd8c11..12f1863 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
@@ -44,9 +44,9 @@ public class PhoenixIndexRpcSchedulerTest {
     public void testIndexPriorityWritesToIndexHandler() throws Exception {
         RpcScheduler mock = Mockito.mock(RpcScheduler.class);
 
-        PhoenixIndexRpcScheduler scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 200, 250);
+        PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250);
         BalancedQueueRpcExecutor executor = new BalancedQueueRpcExecutor("test-queue", 1, 1, 1);
-        scheduler.setExecutorForTesting(executor);
+        scheduler.setIndexExecutorForTesting(executor);
         dispatchCallWithPriority(scheduler, 200);
         List<BlockingQueue<CallRunner>> queues = executor.getQueues();
         assertEquals(1, queues.size());
@@ -54,8 +54,8 @@ public class PhoenixIndexRpcSchedulerTest {
         queue.poll(20, TimeUnit.SECONDS);
 
         // try again, this time we tweak the ranges we support
-        scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 101, 110);
-        scheduler.setExecutorForTesting(executor);
+        scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110);
+        scheduler.setIndexExecutorForTesting(executor);
         dispatchCallWithPriority(scheduler, 101);
         queue.poll(20, TimeUnit.SECONDS);
 
@@ -71,14 +71,14 @@ public class PhoenixIndexRpcSchedulerTest {
     @Test
     public void testDelegateWhenOutsideRange() throws Exception {
         RpcScheduler mock = Mockito.mock(RpcScheduler.class);
-        PhoenixIndexRpcScheduler scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 200, 250);
+        PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250);
         dispatchCallWithPriority(scheduler, 100);
-        dispatchCallWithPriority(scheduler, 250);
+        dispatchCallWithPriority(scheduler, 251);
 
         // try again, this time we tweak the ranges we support
-        scheduler = new PhoenixIndexRpcScheduler(10, conf, mock, 101, 110);
+        scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110);
         dispatchCallWithPriority(scheduler, 200);
-        dispatchCallWithPriority(scheduler, 110);
+        dispatchCallWithPriority(scheduler, 111);
 
         Mockito.verify(mock, Mockito.times(4)).init(Mockito.any(Context.class));
         Mockito.verify(mock, Mockito.times(4)).dispatch(Mockito.any(CallRunner.class));


[33/50] [abbrv] phoenix git commit: PHOENIX-1764 - Pherf ClassCastException

Posted by ma...@apache.org.
PHOENIX-1764 - Pherf ClassCastException


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6b1818c0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6b1818c0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6b1818c0

Branch: refs/heads/calcite
Commit: 6b1818c09233d508105256af311d59d6ab7b06ed
Parents: 623829d
Author: Cody Marcel <co...@apache.org>
Authored: Fri Apr 10 16:03:31 2015 -0700
Committer: Cody Marcel <co...@apache.org>
Committed: Fri Apr 10 16:20:16 2015 -0700

----------------------------------------------------------------------
 phoenix-pherf/cluster/pherf.sh                                     | 2 +-
 .../org/apache/phoenix/pherf/configuration/XMLConfigParser.java    | 2 +-
 phoenix-pherf/standalone/pherf.sh                                  | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b1818c0/phoenix-pherf/cluster/pherf.sh
----------------------------------------------------------------------
diff --git a/phoenix-pherf/cluster/pherf.sh b/phoenix-pherf/cluster/pherf.sh
index 5b02fcb..aeff856 100755
--- a/phoenix-pherf/cluster/pherf.sh
+++ b/phoenix-pherf/cluster/pherf.sh
@@ -28,6 +28,6 @@ for f in $PHERF_HOME/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
-CMD=time $JAVA_HOME/bin/java $REMOTE_DEBUG -Dapp.home=$PHERF_HOME $ENV_PROPS -Xms512m -Xmx3072m -cp $CLASSPATH org.apache.phoenix.pherf.Pherf "$@"
+CMD="time $}JAVA_HOME}/bin/java ${REMOTE_DEBUG} -Dapp.home=${PHERF_HOME} ${ENV_PROPS} -Xms512m -Xmx3072m -cp ${CLASSPATH} org.apache.phoenix.pherf.Pherf ${@}"
 
 eval $CMD
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b1818c0/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
index e9255c6..a288e1b 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
@@ -65,7 +65,7 @@ public class XMLConfigParser {
             return scenarios;
         }
 
-        scenarios = (List<Scenario>) Collections.synchronizedCollection(new ArrayList<Scenario>());
+        scenarios = Collections.synchronizedList(new ArrayList<Scenario>());
         for (Path path : getPaths(getFilePattern())) {
             try {
                 List<Scenario> scenarioList = XMLConfigParser.readDataModel(path).getScenarios();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b1818c0/phoenix-pherf/standalone/pherf.sh
----------------------------------------------------------------------
diff --git a/phoenix-pherf/standalone/pherf.sh b/phoenix-pherf/standalone/pherf.sh
index 483830c..e08035a 100755
--- a/phoenix-pherf/standalone/pherf.sh
+++ b/phoenix-pherf/standalone/pherf.sh
@@ -24,5 +24,5 @@ for f in $PHERF_HOME/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
-CMD=time $JAVA_HOME/bin/java $REMOTE_DEBUG -Dapp.home=$PHERF_HOME $ENV_PROPS -Xms512m -Xmx3072m -cp $CLASSPATH org.apache.phoenix.pherf.Pherf "$@"
+CMD="time $}JAVA_HOME}/bin/java ${REMOTE_DEBUG} -Dapp.home=${PHERF_HOME} ${ENV_PROPS} -Xms512m -Xmx3072m -cp ${CLASSPATH} org.apache.phoenix.pherf.Pherf ${@}"
 eval $CMD
\ No newline at end of file


[16/50] [abbrv] phoenix git commit: PHOENIX-1712 Add INSTR function

Posted by ma...@apache.org.
PHOENIX-1712 Add INSTR function

Add method for detecting a substring within another string.

Signed-off-by: Gabriel Reid <ga...@ngdata.com>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1f942b1f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1f942b1f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1f942b1f

Branch: refs/heads/calcite
Commit: 1f942b1f0e815674f1917c18167d848769435148
Parents: f766a78
Author: NAVEEN MADHIRE <vm...@indiana.edu>
Authored: Mon Mar 16 23:11:45 2015 -0400
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu Apr 2 21:07:55 2015 +0200

----------------------------------------------------------------------
 .../apache/phoenix/end2end/InstrFunctionIT.java | 126 +++++++++++++++++++
 .../phoenix/expression/ExpressionType.java      |   4 +-
 .../expression/function/InstrFunction.java      | 105 ++++++++++++++++
 .../expression/function/InstrFunctionTest.java  | 108 ++++++++++++++++
 4 files changed, 342 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f942b1f/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
new file mode 100644
index 0000000..57c0661
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.junit.Test;
+
+public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
+    private void initTable(Connection conn, String sortOrder, String s, String subStr) throws Exception {
+        String ddl = "CREATE TABLE SAMPLE (name VARCHAR NOT NULL PRIMARY KEY " + sortOrder + ", substr VARCHAR)";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO SAMPLE VALUES(?,?)";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.setString(1, s);
+        stmt.setString(2, subStr);
+        stmt.execute();
+        conn.commit();        
+    }
+    
+     private void testInstr(Connection conn, String queryToExecute, Integer expValue) throws Exception {        
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery(queryToExecute);
+        assertTrue(rs.next());
+        assertEquals(expValue.intValue(), rs.getInt(1));
+        assertFalse(rs.next());
+        
+    }
+    
+      private void testInstrFilter(Connection conn, String queryToExecute, String expected) throws Exception {        
+        ResultSet rs;
+        PreparedStatement stmt = conn.prepareStatement(queryToExecute);
+        rs = stmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(expected, rs.getString(1));
+        
+    }
+
+    @Test
+    public void testSingleByteInstrAscending() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "ASC", "abcdefghijkl","fgh");
+        String queryToExecute = "SELECT INSTR(name, 'fgh') FROM SAMPLE";
+        testInstr(conn, queryToExecute, 5);
+    }
+    
+    @Test
+    public void testSingleByteInstrDescending() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "DESC", "abcdefghijkl","fgh");
+        String queryToExecute = "SELECT INSTR(name, 'fgh') FROM SAMPLE";
+        testInstr(conn, queryToExecute, 5);
+    }
+    
+    @Test
+    public void testSingleByteInstrAscendingNoString() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "ASC", "abcde fghijkl","lmn");
+        String queryToExecute = "SELECT INSTR(name, 'lmn') FROM SAMPLE";
+        testInstr(conn, queryToExecute, -1);
+    }
+    
+    @Test
+    public void testSingleByteInstrDescendingNoString() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "DESC", "abcde fghijkl","lmn");
+        String queryToExecute = "SELECT INSTR(name, 'lmn') FROM SAMPLE";
+        testInstr(conn, queryToExecute, -1);
+    }
+
+    @Test
+    public void testMultiByteInstrAscending() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "ASC", "AɚɦFGH","ɚɦ");
+        String queryToExecute = "SELECT INSTR(name, 'ɚɦ') FROM SAMPLE";
+        testInstr(conn, queryToExecute, 1);
+    }
+    
+    @Test
+    public void testMultiByteInstrDecending() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "DESC", "AɚɦFGH","ɚɦ");
+        String queryToExecute = "SELECT INSTR(name, 'ɚɦ') FROM SAMPLE";
+        testInstr(conn, queryToExecute, 1);
+    } 
+
+    @Test
+    public void testByteInstrAscendingFilter() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "ASC", "abcdefghijkl","fgh");
+        String queryToExecute = "select NAME from sample where instr(name, 'fgh') > 0";
+        testInstrFilter(conn, queryToExecute,"abcdefghijkl");
+    }
+    
+    
+    @Test
+    public void testByteInstrDecendingFilter() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTable(conn, "DESC", "abcdefghijkl","fgh");
+        String queryToExecute = "select NAME from sample where instr(name, 'fgh') > 0";
+        testInstrFilter(conn, queryToExecute,"abcdefghijkl");
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f942b1f/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index a7f8b4f..c25b1cc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -42,6 +42,7 @@ import org.apache.phoenix.expression.function.FloorDecimalExpression;
 import org.apache.phoenix.expression.function.FloorFunction;
 import org.apache.phoenix.expression.function.HourFunction;
 import org.apache.phoenix.expression.function.IndexStateNameFunction;
+import org.apache.phoenix.expression.function.InstrFunction;
 import org.apache.phoenix.expression.function.InvertFunction;
 import org.apache.phoenix.expression.function.LTrimFunction;
 import org.apache.phoenix.expression.function.LastValueFunction;
@@ -205,7 +206,8 @@ public enum ExpressionType {
     SecondFunction(SecondFunction.class),
     WeekFunction(WeekFunction.class),
     HourFunction(HourFunction.class),
-    NowFunction(NowFunction.class)
+    NowFunction(NowFunction.class),
+    InstrFunction(InstrFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f942b1f/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
new file mode 100644
index 0000000..317d4b3
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+
+@BuiltInFunction(name=InstrFunction.NAME, args={
+        @Argument(allowedTypes={ PVarchar.class }),
+        @Argument(allowedTypes={ PVarchar.class })})
+public class InstrFunction extends ScalarFunction{
+    
+    public static final String NAME = "INSTR";
+    
+    private String strToSearch = null;
+    
+    public InstrFunction() { }
+    
+    public InstrFunction(List<Expression> children) {
+        super(children);
+        init();
+    }
+    
+    private void init() {
+        Expression strToSearchExpression = getChildren().get(1);
+        if (strToSearchExpression instanceof LiteralExpression) {
+            Object strToSearchValue = ((LiteralExpression) strToSearchExpression).getValue();
+            if (strToSearchValue != null) {
+                this.strToSearch = strToSearchValue.toString();
+            }
+        }
+    }
+        
+    
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression child = getChildren().get(0);
+        
+        if (!child.evaluate(tuple, ptr)) {
+            return false;
+        }
+        
+        if (ptr.getLength() == 0) {
+            ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+            return true;
+        }
+        
+        int position;
+        //Logic for Empty string search
+        if (strToSearch == null){
+            position = 0;
+            ptr.set(PInteger.INSTANCE.toBytes(position));
+            return true;
+        }
+        
+        String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(0).getSortOrder());
+
+        position = sourceStr.indexOf(strToSearch);
+        ptr.set(PInteger.INSTANCE.toBytes(position));
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+    
+    @Override
+    public void readFields(DataInput input) throws IOException {
+        super.readFields(input);
+        init();
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f942b1f/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
new file mode 100644
index 0000000..603ad39
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import static org.junit.Assert.assertTrue;
+
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.junit.Test;
+
+public class InstrFunctionTest {
+    
+    public static void inputExpression(String value, PDataType dataType, String strToSearch,Integer expected, SortOrder order) throws SQLException{
+        Expression inputArg = LiteralExpression.newConstant(value,dataType,order);
+        
+        Expression strToSearchExp = LiteralExpression.newConstant(strToSearch,dataType);
+        List<Expression> expressions = Arrays.<Expression>asList(inputArg,strToSearchExp);
+        Expression instrFunction = new InstrFunction(expressions);
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        instrFunction.evaluate(null,ptr);
+        Integer result = (Integer) instrFunction.getDataType().toObject(ptr);
+        assertTrue(result.compareTo(expected) == 0);
+        
+    }
+    
+    
+    @Test
+    public void testInstrFunction() throws SQLException {
+        inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 5, SortOrder.ASC);
+        
+        inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 5, SortOrder.DESC);
+        
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 5, SortOrder.ASC);
+        
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 5, SortOrder.DESC);
+        
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", -1, SortOrder.DESC);
+        
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", -1, SortOrder.ASC);
+        
+        inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 5, SortOrder.ASC);
+        
+        inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 5, SortOrder.DESC);
+        
+        inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 8, SortOrder.ASC);
+        
+        inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 8, SortOrder.DESC);
+        
+        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 5, SortOrder.ASC);
+        
+        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 5, SortOrder.DESC);
+        
+        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, "", 0, SortOrder.ASC);
+        
+        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, "", 0, SortOrder.DESC);
+        
+        inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 0, SortOrder.ASC);
+        
+        inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 0, SortOrder.DESC);
+        
+        inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 2, SortOrder.ASC);
+        
+        inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 2, SortOrder.DESC);
+        
+        inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 4, SortOrder.ASC);
+        
+        inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 4, SortOrder.DESC);
+        
+        //Tests for MultiByte Characters
+        
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 1, SortOrder.ASC);
+        
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 1, SortOrder.DESC);
+        
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 2, SortOrder.ASC);
+        
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 2, SortOrder.DESC);
+        
+        inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 2, SortOrder.ASC);
+        
+        inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 2, SortOrder.DESC);
+    }
+    
+
+}


[11/50] [abbrv] phoenix git commit: PHOENIX-1795 Set handlerCount, numQueues and maxQueueLength of index and metadata queues correctly

Posted by ma...@apache.org.
PHOENIX-1795 Set handlerCount, numQueues and maxQueueLength of index and metadata queues correctly


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e2cf44c3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e2cf44c3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e2cf44c3

Branch: refs/heads/calcite
Commit: e2cf44c3c22f8789c4bd1fe529f07f2d6e45e482
Parents: d05d7c8
Author: Thomas <td...@salesforce.com>
Authored: Mon Mar 30 15:21:44 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Tue Mar 31 13:34:17 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/QueryDatabaseMetaDataIT.java   |  1 +
 .../org/apache/phoenix/rpc/PhoenixClientRpcIT.java | 17 ++++-------------
 .../org/apache/phoenix/rpc/PhoenixServerRpcIT.java | 15 ++++++---------
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java      | 16 +++++++++++-----
 .../org/apache/phoenix/query/QueryServices.java    |  4 ++++
 .../apache/phoenix/query/QueryServicesOptions.java | 10 ++++++++--
 .../org/apache/phoenix/jdbc/PhoenixTestDriver.java |  5 ++---
 .../java/org/apache/phoenix/query/BaseTest.java    | 10 ++++++++++
 8 files changed, 46 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index 44086d7..c9ec0ce 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -682,6 +682,7 @@ public class QueryDatabaseMetaDataIT extends BaseClientManagedTimeIT {
             descriptor.addFamily(columnDescriptor);
         }
         admin.createTable(descriptor);
+        admin.close();
             
         long ts = nextTimestamp();
         Properties props = new Properties();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
index deb14db..0c61b55 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
@@ -17,13 +17,11 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.util.Collections;
 import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.ipc.CallRunner;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -35,8 +33,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.Mockito;
 
-import com.google.common.collect.Maps;
-
 public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
 
     private static final String SCHEMA_NAME = "S";
@@ -45,15 +41,10 @@ public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
 
     @BeforeClass
     public static void doSetup() throws Exception {
-        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
-        serverProps.put(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-                TestPhoenixIndexRpcSchedulerFactory.class.getName());
-        serverProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ServerRpcControllerFactory.class.getName());
-        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
-        clientProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ClientRpcControllerFactory.class.getName());
+        Map<String, String> serverProps = Collections.singletonMap(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, 
+        		TestPhoenixIndexRpcSchedulerFactory.class.getName());
         NUM_SLAVES_BASE = 2;
-        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet()
-                .iterator()));
+        setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), ReadOnlyProps.EMPTY_PROPS);
     }
     
     @AfterClass

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index b04f636..dbcd7ac 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -37,7 +38,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.ipc.CallRunner;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -54,8 +54,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.Mockito;
 
-import com.google.common.collect.Maps;
-
 public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
 
     private static final String SCHEMA_NAME = "S";
@@ -65,12 +63,11 @@ public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
     
     @BeforeClass
     public static void doSetup() throws Exception {
-        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
-        serverProps.put(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-                TestPhoenixIndexRpcSchedulerFactory.class.getName());
-        serverProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ServerRpcControllerFactory.class.getName());
-        Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
-        clientProps.put(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, RpcControllerFactory.class.getName());
+    	Map<String, String> serverProps = Collections.singletonMap(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, 
+        		TestPhoenixIndexRpcSchedulerFactory.class.getName());
+        // use the standard rpc controller for client rpc, so that we can isolate server rpc and ensure they use the correct queue  
+    	Map<String, String> clientProps = Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, 
+    			RpcControllerFactory.class.getName());      
         NUM_SLAVES_BASE = 2;
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index e721271..362e2cc 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.ipc;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -43,15 +45,19 @@ public class PhoenixRpcScheduler extends RpcScheduler {
 
     public PhoenixRpcScheduler(Configuration conf, RpcScheduler delegate, int indexPriority, int metadataPriority) {
         // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4
-        int maxQueueLength =  conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
+    	int indexHandlerCount = conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT);
+    	int metadataHandlerCount = conf.getInt(QueryServices.METADATA_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT);
+        int maxIndexQueueLength =  conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, indexHandlerCount*DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
+        int maxMetadataQueueLength =  conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, metadataHandlerCount*DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
         float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
-        int numQueues = Math.max(1, Math.round(callQueuesHandlersFactor));
+        int numIndexQueues = Math.max(1, Math.round(indexHandlerCount * callQueuesHandlersFactor));
+        int numMetadataQueues = Math.max(1, Math.round(metadataHandlerCount * callQueuesHandlersFactor));
 
         this.indexPriority = indexPriority;
         this.metadataPriority = metadataPriority;
         this.delegate = delegate;
-        this.indexCallExecutor = new BalancedQueueRpcExecutor("Index", 1, numQueues, maxQueueLength);
-        this.metadataCallExecutor = new BalancedQueueRpcExecutor("Metadata", 1, numQueues, maxQueueLength);
+        this.indexCallExecutor = new BalancedQueueRpcExecutor("Index", indexHandlerCount, numIndexQueues, maxIndexQueueLength);
+        this.metadataCallExecutor = new BalancedQueueRpcExecutor("Metadata", metadataHandlerCount, numMetadataQueues, maxMetadataQueueLength);
     }
 
     @Override
@@ -120,4 +126,4 @@ public class PhoenixRpcScheduler extends RpcScheduler {
     }
     
     
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 65f6acf..7a911e7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -152,6 +152,10 @@ public interface QueryServices extends SQLCloseable {
     public static final String DEFAULT_KEEP_DELETED_CELLS_ATTRIB = "phoenix.table.default.keep.deleted.cells";
     public static final String DEFAULT_STORE_NULLS_ATTRIB = "phoenix.table.default.store.nulls";
     public static final String METRICS_ENABLED = "phoenix.query.metrics.enabled";
+    
+    // rpc queue configs
+    public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.rpc.index.handler.count";
+    public static final String METADATA_HANDLER_COUNT_ATTRIB = "phoenix.rpc.metadata.handler.count";
 
     /**
      * Get executor service used for parallel scans

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 97040d2..3561663 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -67,8 +67,10 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.trace.util.Tracing;
@@ -147,6 +149,8 @@ public class QueryServicesOptions {
     public static final int DEFAULT_INDEX_PRIORITY = 1000;
     public static final int DEFAULT_METADATA_PRIORITY = 2000;
     public static final boolean DEFAULT_ALLOW_LOCAL_INDEX = true;
+    public static final int DEFAULT_INDEX_HANDLER_COUNT = 30;
+    public static final int DEFAULT_METADATA_HANDLER_COUNT = 30;
 
     public static final int DEFAULT_TRACING_PAGE_SIZE = 100;
     /**
@@ -185,6 +189,8 @@ public class QueryServicesOptions {
     public static final boolean DEFAULT_AUTO_COMMIT = false;
     public static final boolean DEFAULT_IS_METRICS_ENABLED = true;
     
+    private static final String DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY = ClientRpcControllerFactory.class.getName();
+    
     private final Configuration config;
 
     private QueryServicesOptions(Configuration config) {
@@ -237,7 +243,7 @@ public class QueryServicesOptions {
             .setIfUnset(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK)
             .setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK)
             .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED)
-            .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, ClientRpcControllerFactory.class.getName());
+            .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY)
             ;
         // HBase sets this to 1, so we reset it to something more appropriate.
         // Hopefully HBase will change this, because we can't know if a user set
@@ -508,8 +514,8 @@ public class QueryServicesOptions {
     public QueryServicesOptions setDelayInMillisForSchemaChangeCheck(long delayInMillis) {
         config.setLong(DELAY_FOR_SCHEMA_UPDATE_CHECK, delayInMillis);
         return this;
-    }
     
+    }
     public QueryServicesOptions setMetricsEnabled(boolean flag) {
         config.setBoolean(METRICS_ENABLED, flag);
         return this;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java
index 0d3c461..d4956ee 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java
@@ -55,14 +55,13 @@ public class PhoenixTestDriver extends PhoenixEmbeddedDriver {
     private boolean closed = false;
 
     public PhoenixTestDriver() {
-        this.overrideProps = ReadOnlyProps.EMPTY_PROPS;
-        queryServices = new QueryServicesTestImpl(getDefaultProps());
+        this(ReadOnlyProps.EMPTY_PROPS);
     }
 
     // For tests to override the default configuration
     public PhoenixTestDriver(ReadOnlyProps props) {
         overrideProps = props;
-        queryServices = new QueryServicesTestImpl(getDefaultProps(),overrideProps);
+        queryServices = new QueryServicesTestImpl(getDefaultProps(), overrideProps);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2cf44c3/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 748ad19..e5884c3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -118,7 +118,12 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.LocalIndexMerger;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseClientManagedTimeIT;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
@@ -458,6 +463,8 @@ public abstract class BaseTest {
     
     private static final String ORG_ID = "00D300000000XHP";
     protected static int NUM_SLAVES_BASE = 1;
+    private static final String DEFAULT_SERVER_RPC_CONTROLLER_FACTORY = ServerRpcControllerFactory.class.getName();
+    private static final String DEFAULT_RPC_SCHEDULER_FACTORY = PhoenixRpcSchedulerFactory.class.getName();
     
     protected static String getZKClientPort(Configuration conf) {
         return conf.get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
@@ -613,6 +620,9 @@ public abstract class BaseTest {
         }
         //no point doing sanity checks when running tests.
         conf.setBoolean("hbase.table.sanity.checks", false);
+        // set the server rpc controller and rpc scheduler factory, used to configure the cluster
+        conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_SERVER_RPC_CONTROLLER_FACTORY);
+        conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, DEFAULT_RPC_SCHEDULER_FACTORY);
         
         // override any defaults based on overrideProps
         for (Entry<String,String> entry : overrideProps) {


[40/50] [abbrv] phoenix git commit: PHOENIX-1705 implement ARRAY_APPEND built in function (Dumindu Buddhika)

Posted by ma...@apache.org.
PHOENIX-1705 implement ARRAY_APPEND built in function (Dumindu Buddhika)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/986080f3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/986080f3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/986080f3

Branch: refs/heads/calcite
Commit: 986080f3fb939252c01b8c79d0bcdb602e0ddd64
Parents: 1b45110
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Apr 14 23:26:22 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Apr 14 23:26:22 2015 +0530

----------------------------------------------------------------------
 .../phoenix/expression/ExpressionType.java      |  4 +-
 .../phoenix/schema/types/PArrayDataType.java    | 85 ++++++++++++++++++++
 2 files changed, 88 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/986080f3/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index d562d6a..22778ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -21,6 +21,7 @@ import java.util.Map;
 
 import org.apache.phoenix.expression.function.ArrayAllComparisonExpression;
 import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
+import org.apache.phoenix.expression.function.ArrayAppendFunction;
 import org.apache.phoenix.expression.function.ArrayElemRefExpression;
 import org.apache.phoenix.expression.function.ArrayIndexFunction;
 import org.apache.phoenix.expression.function.ArrayLengthFunction;
@@ -211,7 +212,8 @@ public enum ExpressionType {
     NowFunction(NowFunction.class),
     InstrFunction(InstrFunction.class),
     MinuteFunction(MinuteFunction.class),
-    DayOfMonthFunction(DayOfMonthFunction.class)
+    DayOfMonthFunction(DayOfMonthFunction.class),
+    ArrayAppendFunction(ArrayAppendFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/986080f3/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index c183b7a..b6dce34 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -456,6 +456,91 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
         return null;
     }
 
+    public static boolean appendItemToArray(ImmutableBytesWritable ptr, int length, int offset, byte[] arrayBytes, PDataType baseType, int arrayLength, Integer maxLength, SortOrder sortOrder) {
+        int elementLength = maxLength == null ? ptr.getLength() : maxLength;
+
+        //padding
+        if (elementLength > ptr.getLength()) {
+            baseType.pad(ptr, elementLength, sortOrder);
+        }
+
+        int elementOffset = ptr.getOffset();
+        byte[] elementBytes = ptr.get();
+
+        byte[] newArray;
+        if (!baseType.isFixedWidth()) {
+
+            int offsetArrayPosition = Bytes.toInt(arrayBytes, offset + length - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT);
+            int offsetArrayLength = length - offsetArrayPosition - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE;
+
+            //checks whether offset array consists of shorts or integers
+            boolean useInt = offsetArrayLength / Math.abs(arrayLength) == Bytes.SIZEOF_INT;
+            boolean convertToInt = false;
+
+            int newElementPosition = offsetArrayPosition - 2 * Bytes.SIZEOF_BYTE;
+
+            if (!useInt) {
+                if (PArrayDataType.useShortForOffsetArray(newElementPosition)) {
+                    newArray = new byte[length + elementLength + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BYTE];
+                } else {
+                    newArray = new byte[length + elementLength + arrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE];
+                    convertToInt = true;
+                }
+            } else {
+                newArray = new byte[length + elementLength + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE];
+            }
+
+            int newOffsetArrayPosition = newElementPosition + elementLength + 3 * Bytes.SIZEOF_BYTE;
+
+            System.arraycopy(arrayBytes, offset, newArray, 0, newElementPosition);
+            System.arraycopy(elementBytes, elementOffset, newArray, newElementPosition, elementLength);
+
+            arrayLength = (Math.abs(arrayLength) + 1) * (int) Math.signum(arrayLength);
+            if (useInt) {
+                System.arraycopy(arrayBytes, offset + offsetArrayPosition, newArray, newOffsetArrayPosition, offsetArrayLength);
+                Bytes.putInt(newArray, newOffsetArrayPosition + offsetArrayLength, newElementPosition);
+
+                writeEndBytes(newArray, newOffsetArrayPosition, offsetArrayLength, arrayLength, arrayBytes[offset + length - 1], true);
+            } else {
+                if (!convertToInt) {
+                    System.arraycopy(arrayBytes, offset + offsetArrayPosition, newArray, newOffsetArrayPosition, offsetArrayLength);
+                    Bytes.putShort(newArray, newOffsetArrayPosition + offsetArrayLength, (short) (newElementPosition - Short.MAX_VALUE));
+
+                    writeEndBytes(newArray, newOffsetArrayPosition, offsetArrayLength, arrayLength, arrayBytes[offset + length - 1], false);
+                } else {
+                    int off = newOffsetArrayPosition;
+                    for (int arrayIndex = 0; arrayIndex < Math.abs(arrayLength) - 1; arrayIndex++) {
+                        Bytes.putInt(newArray, off, getOffset(arrayBytes, arrayIndex, true, offsetArrayPosition));
+                        off += Bytes.SIZEOF_INT;
+                    }
+
+                    Bytes.putInt(newArray, off, newElementPosition);
+                    Bytes.putInt(newArray, off + Bytes.SIZEOF_INT, newOffsetArrayPosition);
+                    Bytes.putInt(newArray, off + 2 * Bytes.SIZEOF_INT, -arrayLength);
+                    Bytes.putByte(newArray, off + 3 * Bytes.SIZEOF_INT, arrayBytes[offset + length - 1]);
+
+                }
+            }
+        } else {
+            newArray = new byte[length + elementLength];
+
+            System.arraycopy(arrayBytes, offset, newArray, 0, length);
+            System.arraycopy(elementBytes, elementOffset, newArray, length, elementLength);
+        }
+
+        ptr.set(newArray);
+
+        return true;
+    }
+
+    private static void writeEndBytes(byte[] array, int newOffsetArrayPosition, int offsetArrayLength, int arrayLength, byte header, boolean useInt) {
+        int byteSize = useInt ? Bytes.SIZEOF_INT : Bytes.SIZEOF_SHORT;
+
+        Bytes.putInt(array, newOffsetArrayPosition + offsetArrayLength + byteSize, newOffsetArrayPosition);
+        Bytes.putInt(array, newOffsetArrayPosition + offsetArrayLength + byteSize + Bytes.SIZEOF_INT, arrayLength);
+        Bytes.putByte(array, newOffsetArrayPosition + offsetArrayLength + byteSize + 2 * Bytes.SIZEOF_INT, header);
+    }
+
     public static int serailizeOffsetArrayIntoStream(DataOutputStream oStream, TrustedByteArrayOutputStream byteStream,
             int noOfElements, int maxOffset, int[] offsetPos) throws IOException {
         int offsetPosition = (byteStream.size());


[47/50] [abbrv] phoenix git commit: PHOENIX-1814 Exponential notation parsing and tests

Posted by ma...@apache.org.
PHOENIX-1814 Exponential notation parsing and tests


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1474231
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1474231
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1474231

Branch: refs/heads/calcite
Commit: d147423137af487c0582234c12cc00b0e8f6be98
Parents: 007361b
Author: Brian <be...@salesforce.com>
Authored: Tue Apr 14 13:49:24 2015 -0700
Committer: Thomas D'Silva <tw...@gmail.com>
Committed: Wed Apr 15 11:09:12 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ArithmeticQueryIT.java      | 60 ++++++++++++++++++++
 phoenix-core/src/main/antlr3/PhoenixSQL.g       | 16 +++++-
 2 files changed, 74 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1474231/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
index 72eb016..f56c965 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
@@ -985,4 +985,64 @@ public class ArithmeticQueryIT extends BaseHBaseManagedTimeIT {
         assertTrue(rs.next());
         assertEquals(-1.0f, rs.getFloat(1), 0.001);
     }
+    
+    @Test
+    public void testSystemTableHasDoubleForExponentialNumber() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE test (id VARCHAR not null primary key, num FLOAT)";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO test(id,num) VALUES ('testid', 1.2E3)";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT 1.2E3 FROM SYSTEM.CATALOG LIMIT 1");
+        assertTrue(rs.next());
+        assertTrue(rs.getObject(1) instanceof Double);
+    }
+    
+    @Test
+    public void testFloatingPointWithExponentialNotation() throws Exception {
+    	Float[] expected = {1.5E7f, 1.5E-7f, -1.5E-7f, 12E-5f, -.12E+34f};
+    	String[] values = {"1.5e7", "1.5e-7", "-1.5e-7", "12E-5", "-.12E+34"};
+        ResultSet rs = createTableWithValues(values, "FLOAT");
+        for (int i = 0; i < expected.length; i++) {
+        	assertEquals(expected[i], rs.getFloat(i+1), 0.001);
+        }
+    }
+    
+    @Test
+    public void testDoubleWithExponentialNotation() throws Exception {
+    	Double[] expected = {1.5E7d, 1.5E-7d, -1.5E-7d, 12E-5d, -.654E-321d, .1234E+56d};
+    	String[] values = {"1.5e7", "1.5e-7", "-1.5e-7", "12E-5", "-.654E-321", ".1234E+56"};
+    	ResultSet rs = createTableWithValues(values, "DOUBLE");
+        for (int i = 0; i < expected.length; i++) {
+        	assertEquals(expected[i], rs.getDouble(i+1), 0.001);
+        }
+    }
+    
+    private ResultSet createTableWithValues(String[] values, String valueType) throws SQLException {
+    	Connection conn = DriverManager.getConnection(getUrl());
+        StringBuilder ddl = new StringBuilder("CREATE TABLE test (id VARCHAR not null primary key");
+        StringBuilder dmll = new StringBuilder("UPSERT INTO test(id,");
+        StringBuilder dmlr = new StringBuilder(") VALUES ('testid'");
+        StringBuilder select = new StringBuilder("SELECT");
+        for(int i = 0; i < values.length; i++) {
+        	ddl.append(", num").append(i).append(" ").append(valueType);
+        	dmll.append("num").append(i).append(",");
+        	dmlr.append(", ").append(values[i]);
+        	select.append(" num").append(i).append(",");
+        }
+        ddl.append(")");
+        dmlr.append(")");
+        dmll.deleteCharAt(dmll.length()-1);
+        select.deleteCharAt(select.length()-1);
+        select.append(" FROM test");
+        conn.createStatement().execute(ddl.toString());
+        conn.createStatement().execute(dmll.toString() + dmlr.toString());
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery(select.toString());
+        rs.next();
+        return rs;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1474231/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 9f60424..f57c5cc 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -903,6 +903,9 @@ literal returns [LiteralParseNode ret]
     |   d=DECIMAL  {
             ret = factory.realNumber(d.getText());
         }
+    |   dbl=DOUBLE  {
+            ret = factory.literal(Double.valueOf(dbl.getText()));
+        }    
     |   NULL {ret = factory.literal(null);}
     |   TRUE {ret = factory.literal(Boolean.TRUE);} 
     |   FALSE {ret = factory.literal(Boolean.FALSE);}
@@ -967,9 +970,18 @@ NUMBER
     :   POSINTEGER
     ;
 
-// Exponential format is not supported.
 DECIMAL
-    :   POSINTEGER? '.' POSINTEGER
+	:	POSINTEGER? '.' POSINTEGER
+	;
+	
+DOUBLE
+    :   '.' POSINTEGER Exponent
+    |   POSINTEGER '.' Exponent
+    |   POSINTEGER ('.' (POSINTEGER (Exponent)?)? | Exponent)
+    ;
+
+Exponent
+    :    ('e' | 'E') ( PLUS | MINUS )? POSINTEGER
     ;
 
 DOUBLE_QUOTE


[13/50] [abbrv] phoenix git commit: PHOENIX-1781 Add Now() (Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1781 Add Now() (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/13d6296f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/13d6296f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/13d6296f

Branch: refs/heads/calcite
Commit: 13d6296f7cab70e45a5fa9e579f81b2fa0dc03fd
Parents: 0eca5f1
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Wed Apr 1 22:36:57 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Wed Apr 1 22:36:57 2015 +0530

----------------------------------------------------------------------
 .../end2end/YearMonthSecondFunctionIT.java      | 30 ++++++++++--
 .../phoenix/expression/ExpressionType.java      |  6 ++-
 .../expression/function/NowFunction.java        | 48 ++++++++++++++++++++
 3 files changed, 79 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/13d6296f/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
index 3742a17..20a88c0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
@@ -15,7 +15,9 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
+import java.sql.Date;
 import java.sql.DriverManager;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -58,6 +60,8 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
     public void testYearFunctionDate() throws SQLException {
 
         assertEquals(2015, callYearFunction("YEAR(current_date())"));
+        
+        assertEquals(2015, callYearFunction("YEAR(now())"));
 
         assertEquals(2008, callYearFunction("YEAR(TO_DATE('2008-01-01', 'yyyy-MM-dd', 'local'))"));
 
@@ -174,18 +178,18 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         String ddl =
                 "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
         conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-02-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'))";
         conn.createStatement().execute(dml);
         conn.commit();
 
         ResultSet rs = conn.createStatement().executeQuery("SELECT k1, WEEK(dates), WEEK(times) FROM T1 where WEEK(timestamps)=15");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
-        assertEquals(9, rs.getInt(2));
+        assertEquals(5, rs.getInt(2));
         assertEquals(20, rs.getInt(3));
         assertFalse(rs.next());
     }
-    
+
     @Test
     public void testHourFuncAgainstColumns() throws Exception {
         String ddl =
@@ -204,4 +208,24 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         assertEquals(20, rs.getInt(4));
         assertFalse(rs.next());
     }
+
+    @Test
+    public void testNowFunction() throws Exception {
+        Date date = new Date(System.currentTimeMillis());
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, timestamps TIMESTAMP CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (?, ?)";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.setInt(1, 1);
+        stmt.setDate(2, new Date(date.getTime()-500));
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT * from T1 where now() > timestamps");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(new Date(date.getTime()-500), rs.getDate(2));
+        assertFalse(rs.next());
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13d6296f/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 8a2f127..a7f8b4f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -52,6 +52,7 @@ import org.apache.phoenix.expression.function.MD5Function;
 import org.apache.phoenix.expression.function.MaxAggregateFunction;
 import org.apache.phoenix.expression.function.MinAggregateFunction;
 import org.apache.phoenix.expression.function.MonthFunction;
+import org.apache.phoenix.expression.function.NowFunction;
 import org.apache.phoenix.expression.function.NthValueFunction;
 import org.apache.phoenix.expression.function.PercentRankAggregateFunction;
 import org.apache.phoenix.expression.function.PercentileContAggregateFunction;
@@ -92,7 +93,7 @@ import com.google.common.collect.Maps;
 
 /**
  *
- * Enumeration of all Expression types that may be evaluated on the server-side.
+ * Enumeration of all Expression types that will be looked up. They may be evaluated on the server-side.
  * Used during serialization and deserialization to pass Expression between client
  * and server.
  *
@@ -203,7 +204,8 @@ public enum ExpressionType {
     MonthFunction(MonthFunction.class),
     SecondFunction(SecondFunction.class),
     WeekFunction(WeekFunction.class),
-    HourFunction(HourFunction.class)
+    HourFunction(HourFunction.class),
+    NowFunction(NowFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/13d6296f/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NowFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NowFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NowFunction.java
new file mode 100644
index 0000000..dc90249
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/NowFunction.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.CurrentDateParseNode;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+
+/**
+ * 
+ * Function used to represent NOW()
+ * The function returns a {@link org.apache.phoenix.schema.types.PTimestamp}
+ *
+ */
+@BuiltInFunction(name = NowFunction.NAME,
+nodeClass=CurrentDateParseNode.class, args= {})
+public abstract class NowFunction extends ScalarFunction {
+    
+    public static final String NAME = "NOW";
+    
+    public NowFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }   
+    
+}


[20/50] [abbrv] phoenix git commit: PHOENIX-1683 Support HBase HA Query(timeline-consistent region replica read) (Rajeshbabu Chintaguntla)

Posted by ma...@apache.org.
PHOENIX-1683 Support HBase HA Query(timeline-consistent region replica read) (Rajeshbabu Chintaguntla)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/742ca13d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/742ca13d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/742ca13d

Branch: refs/heads/calcite
Commit: 742ca13d356c13a0055bd63299940219e14827fb
Parents: 3a0ce7d
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Fri Apr 3 14:12:25 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Fri Apr 3 14:12:25 2015 +0530

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterSessionIT.java  | 92 ++++++++++++++++++++
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  8 ++
 .../apache/phoenix/execute/BaseQueryPlan.java   |  6 ++
 .../apache/phoenix/iterate/ExplainTable.java    |  7 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  | 15 +++-
 .../apache/phoenix/jdbc/PhoenixStatement.java   | 56 ++++++++++++
 .../phoenix/parse/AlterSessionStatement.java    | 38 ++++++++
 .../apache/phoenix/parse/ParseNodeFactory.java  |  4 +
 .../org/apache/phoenix/query/QueryServices.java |  2 +
 .../phoenix/query/QueryServicesOptions.java     |  3 +
 .../java/org/apache/phoenix/util/JDBCUtil.java  | 42 +++++++--
 .../org/apache/phoenix/util/PhoenixRuntime.java |  5 ++
 .../org/apache/phoenix/util/JDBCUtilTest.java   | 15 ++++
 13 files changed, 284 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterSessionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterSessionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterSessionIT.java
new file mode 100644
index 0000000..d97d6d4
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterSessionIT.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.Properties;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ *
+ * Basic tests for Alter Session Statements
+ *
+ */
+public class AlterSessionIT extends BaseHBaseManagedTimeIT {
+
+    Connection testConn;
+
+    @Before
+    public void initTable() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        testConn = DriverManager.getConnection(getUrl(), props);
+        assertEquals(Consistency.STRONG, ((PhoenixConnection)testConn).getConsistency());
+        testConn.createStatement().execute("create table AlterSessionIT (col1 varchar primary key)");
+        testConn.commit();
+    }
+
+    @Test
+    public void testUpdateConsistency() throws Exception {
+        try {
+            Statement st = testConn.createStatement();
+            st.execute("alter session set Consistency = 'timeline'");
+            ResultSet rs = st.executeQuery("explain select * from AlterSessionIT");
+            assertEquals(Consistency.TIMELINE, ((PhoenixConnection)testConn).getConsistency());
+            String queryPlan = QueryUtil.getExplainPlan(rs);
+            assertTrue(queryPlan.indexOf("TIMELINE") > 0);
+
+            // turn off timeline read consistency
+            st.execute("alter session set Consistency = 'strong'");
+            rs = st.executeQuery("explain select * from AlterSessionIT");
+            queryPlan = QueryUtil.getExplainPlan(rs);
+            assertTrue(queryPlan.indexOf("TIMELINE") < 0);
+        } finally {
+            this.testConn.close();
+        }
+    }
+
+    @Test
+    public void testSetConsistencyInURL() throws Exception {
+        try {
+            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+            Connection conn = DriverManager.getConnection(getUrl() + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR +
+                    "Consistency=TIMELINE", props);
+            assertEquals(Consistency.TIMELINE, ((PhoenixConnection)conn).getConsistency());
+            Statement st = conn.createStatement();
+            ResultSet rs = st.executeQuery("explain select * from AlterSessionIT");
+            String queryPlan = QueryUtil.getExplainPlan(rs);
+            assertTrue(queryPlan.indexOf("TIMELINE") > 0);
+            conn.close();
+        } finally {
+            this.testConn.close();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 0330a39..61d5afa 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -70,6 +70,7 @@ tokens
     KEY='key';
     ALTER='alter';
     COLUMN='column';
+    SESSION='session';
     TABLE='table';
     ADD='add';
     SPLIT='split';
@@ -372,6 +373,7 @@ non_select_node returns [BindableStatement ret]
     |   s=alter_index_node
     |   s=alter_table_node
     |   s=trace_node
+    |   s=alter_session_node
     |	s=create_sequence_node
     |	s=drop_sequence_node
     |   s=update_statistics_node
@@ -512,6 +514,12 @@ trace_node returns [TraceStatement ret]
        {ret = factory.trace(Tracing.isTraceOn(flag.getText()), s == null ? Tracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());}
     ;
 
+// Parse an alter session statement.
+alter_session_node returns [AlterSessionStatement ret]
+    :   ALTER SESSION (SET p=properties)
+       {ret = factory.alterSession(p);}
+    ;
+
 // Parse an alter table statement.
 alter_table_node returns [AlterTableStatement ret]
     :   ALTER (TABLE | v=VIEW) t=from_table_name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 4ca2dee..9b2d05a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -178,6 +178,12 @@ public abstract class BaseQueryPlan implements QueryPlan {
         // is resolved.
         // TODO: include time range in explain plan?
         PhoenixConnection connection = context.getConnection();
+
+        // set read consistency
+        if (context.getCurrentTable() != null
+                && context.getCurrentTable().getTable().getType() != PTableType.SYSTEM) {
+            scan.setConsistency(connection.getConsistency());
+        }
         if (context.getScanTimeRange() == null) {
           Long scn = connection.getSCN();
           if (scn == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 9756871..2fcc2fb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -23,6 +23,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.NoSuchElementException;
 
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -98,6 +99,11 @@ public abstract class ExplainTable {
         StringBuilder buf = new StringBuilder(prefix);
         ScanRanges scanRanges = context.getScanRanges();
         boolean hasSkipScanFilter = false;
+        Scan scan = context.getScan();
+
+        if (scan.getConsistency() != Consistency.STRONG){
+            buf.append("TIMELINE-CONSISTENCY ");
+        }
         if (hint.hasHint(Hint.SMALL)) {
             buf.append("SMALL ");
         }
@@ -115,7 +121,6 @@ public abstract class ExplainTable {
         }
         planSteps.add(buf.toString());
         
-        Scan scan = context.getScan();
         Filter filter = scan.getFilter();
         PageFilter pageFilter = null;
         if (filter != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 732dd8b..1277151 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -55,6 +55,7 @@ import java.util.concurrent.Executor;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -137,7 +138,8 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     private Sampler<?> sampler;
     private boolean readOnly = false;
     private Map<String, String> customTracingAnnotations = emptyMap(); 
- 
+    private Consistency consistency = Consistency.STRONG;
+
     static {
         Tracing.addTraceMetricsSource();
     }
@@ -205,6 +207,9 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
                 this.services.getProps().getBoolean(
                         QueryServices.AUTO_COMMIT_ATTRIB,
                         QueryServicesOptions.DEFAULT_AUTO_COMMIT));
+        this.consistency = JDBCUtil.getConsistencyLevel(url, this.info, this.services.getProps()
+                 .get(QueryServices.CONSISTENCY_ATTRIB,
+                         QueryServicesOptions.DEFAULT_CONSISTENCY_LEVEL));
         this.tenantId = tenantId;
         this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, this.services.getProps());
         datePattern = this.services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
@@ -509,6 +514,10 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         return isAutoCommit;
     }
 
+    public Consistency getConsistency() {
+        return this.consistency;
+    }
+
     @Override
     public String getCatalog() throws SQLException {
         return "";
@@ -647,6 +656,10 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         this.isAutoCommit = isAutoCommit;
     }
 
+    public void setConsistency(Consistency val) {
+        this.consistency = val;
+    }
+
     @Override
     public void setCatalog(String catalog) throws SQLException {
         throw new SQLFeatureNotSupportedException();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index f802ff4..ee6b016 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -36,6 +36,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.call.CallRunner;
@@ -73,6 +74,7 @@ import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.parse.AddColumnStatement;
 import org.apache.phoenix.parse.AliasedNode;
 import org.apache.phoenix.parse.AlterIndexStatement;
+import org.apache.phoenix.parse.AlterSessionStatement;
 import org.apache.phoenix.parse.BindableStatement;
 import org.apache.phoenix.parse.ColumnDef;
 import org.apache.phoenix.parse.ColumnName;
@@ -127,6 +129,7 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.PhoenixContextExecutor;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
@@ -716,6 +719,54 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         }
     }
 
+    private static class ExecutableAlterSessionStatement extends AlterSessionStatement implements CompilableStatement {
+
+        public ExecutableAlterSessionStatement(Map<String,Object> props) {
+            super(props);
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException {
+            final StatementContext context = new StatementContext(stmt);
+            return new MutationPlan() {
+
+                @Override
+                public StatementContext getContext() {
+                    return context;
+                }
+
+                @Override
+                public ParameterMetaData getParameterMetaData() {
+                    return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
+                }
+
+                @Override
+                public ExplainPlan getExplainPlan() throws SQLException {
+                    return new ExplainPlan(Collections.singletonList("ALTER SESSION"));
+                }
+
+                @Override
+                public PhoenixConnection getConnection() {
+                    return stmt.getConnection();
+                }
+
+                @Override
+                public MutationState execute() throws SQLException {
+                    Object consistency = getProps().get(PhoenixRuntime.CONSISTENCY_ATTRIB.toUpperCase());
+                    if(consistency != null) {
+                        if (((String)consistency).equalsIgnoreCase(Consistency.TIMELINE.toString())){
+                            getConnection().setConsistency(Consistency.TIMELINE);
+                        } else {
+                            getConnection().setConsistency(Consistency.STRONG);
+                        }
+                    }
+                    return new MutationState(0, context.getConnection());
+                }
+            };
+        }
+    }
+
     private static class ExecutableUpdateStatisticsStatement extends UpdateStatisticsStatement implements
             CompilableStatement {
         public ExecutableUpdateStatisticsStatement(NamedTableNode table, StatisticsCollectionScope scope, Map<String,Object> props) {
@@ -915,6 +966,11 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
         }
 
         @Override
+        public AlterSessionStatement alterSession(Map<String, Object> props) {
+            return new ExecutableAlterSessionStatement(props);
+        }
+
+        @Override
         public ExplainStatement explain(BindableStatement statement) {
             return new ExecutableExplainStatement(statement);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java
new file mode 100644
index 0000000..5d944df
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.util.Map;
+
+public class AlterSessionStatement extends MutableStatement {
+
+    private final Map<String,Object> props;
+
+    public AlterSessionStatement(Map<String,Object> props) {
+        this.props = props;
+    }
+
+    @Override
+    public int getBindCount() {
+        return 0;
+    }
+
+    public Map<String, Object> getProps(){
+        return props;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index eb1768c..62db00a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -330,6 +330,10 @@ public class ParseNodeFactory {
         return new TraceStatement(isTraceOn, samplingRate);
     }
 
+    public AlterSessionStatement alterSession(Map<String,Object> props) {
+        return new AlterSessionStatement(props);
+    }
+
     public TableName table(String schemaName, String tableName) {
         return TableName.createNormalized(schemaName,tableName);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 7a911e7..adf146d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -48,6 +48,8 @@ public interface QueryServices extends SQLCloseable {
     public static final String HBASE_CLIENT_PRINCIPAL = "hbase.myclient.principal";
     public static final String SPOOL_DIRECTORY = "phoenix.spool.directory";
     public static final String AUTO_COMMIT_ATTRIB = "phoenix.connection.autoCommit";
+    // consistency configuration setting
+    public static final String CONSISTENCY_ATTRIB = "phoenix.connection.consistency";
 
     /**
 	 * max size to spool the the result into

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 3561663..884b820 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -67,6 +67,7 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory;
@@ -191,6 +192,8 @@ public class QueryServicesOptions {
     
     private static final String DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY = ClientRpcControllerFactory.class.getName();
     
+    public static final String DEFAULT_CONSISTENCY_LEVEL = Consistency.STRONG.toString();
+
     private final Configuration config;
 
     private QueryServicesOptions(Configuration config) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
index 06534d1..ddd9753 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
@@ -26,6 +26,7 @@ import java.util.Properties;
 
 import javax.annotation.Nullable;
 
+import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PName;
@@ -54,12 +55,15 @@ public class JDBCUtil {
      * @return the property value or null if not found
      */
     public static String findProperty(String url, Properties info, String propName) {
-        String urlPropName = ";" + propName + "=";
+        String urlPropName = PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + propName.toUpperCase() + "=";
+        String upperCaseURL = url.toUpperCase();
         String propValue = info.getProperty(propName);
         if (propValue == null) {
-            int begIndex = url.indexOf(urlPropName);
+            int begIndex = upperCaseURL.indexOf(urlPropName);
             if (begIndex >= 0) {
-                int endIndex = url.indexOf(';',begIndex + urlPropName.length());
+                int endIndex =
+                        upperCaseURL.indexOf(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR, begIndex
+                                + urlPropName.length());
                 if (endIndex < 0) {
                     endIndex = url.length();
                 }
@@ -70,10 +74,13 @@ public class JDBCUtil {
     }
 
     public static String removeProperty(String url, String propName) {
-        String urlPropName = ";" + propName + "=";
-        int begIndex = url.indexOf(urlPropName);
+        String urlPropName = PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + propName.toUpperCase() + "=";
+        String upperCaseURL = url.toUpperCase();
+        int begIndex = upperCaseURL.indexOf(urlPropName);
         if (begIndex >= 0) {
-            int endIndex = url.indexOf(';', begIndex + urlPropName.length());
+            int endIndex =
+                    upperCaseURL.indexOf(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR, begIndex
+                            + urlPropName.length());
             if (endIndex < 0) {
                 endIndex = url.length();
             }
@@ -93,7 +100,7 @@ public class JDBCUtil {
 		for (String propName : info.stringPropertyNames()) {
 			result.put(propName, info.getProperty(propName));
 		}
-		String[] urlPropNameValues = url.split(";");
+		String[] urlPropNameValues = url.split(Character.toString(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR));
 		if (urlPropNameValues.length > 1) {
 			for (int i = 1; i < urlPropNameValues.length; i++) {
 				String[] urlPropNameValue = urlPropNameValues[i].split("=");
@@ -154,4 +161,25 @@ public class JDBCUtil {
         }
         return Boolean.valueOf(autoCommit);
     }
+
+    /**
+     * Retrieve the value of the optional consistency read setting from JDBC url or connection
+     * properties.
+     *
+     * @param url JDBC url used for connecting to Phoenix
+     * @param info connection properties
+     * @param defaultValue default to return if ReadConsistency property is not set in the url
+     *                     or connection properties
+     * @return the boolean value supplied for the AutoCommit in the connection URL or properties,
+     * or the supplied default value if no AutoCommit attribute was provided
+     */
+    public static Consistency getConsistencyLevel(String url, Properties info, String defaultValue) {
+        String consistency = findProperty(url, info, PhoenixRuntime.CONSISTENCY_ATTRIB);
+
+        if(consistency != null && consistency.equalsIgnoreCase(Consistency.TIMELINE.toString())){
+            return Consistency.TIMELINE;
+        }
+
+        return Consistency.STRONG;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index b030510..e5ead10 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -142,6 +142,11 @@ public class PhoenixRuntime {
     public static final String AUTO_COMMIT_ATTRIB = "AutoCommit";
 
     /**
+     * Use this connection property to explicitly set read consistency level on a new connection.
+     */
+    public static final String CONSISTENCY_ATTRIB = "Consistency";
+
+    /**
      * Use this as the zookeeper quorum name to have a connection-less connection. This enables
      * Phoenix-compatible HFiles to be created in a map/reduce job by creating tables,
      * upserting data into them, and getting the uncommitted state through {@link #getUncommittedData(Connection)}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/742ca13d/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
index 74b397f..fc29ad6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
 import java.util.Map;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.client.Consistency;
 import org.junit.Test;
 
 public class JDBCUtilTest {
@@ -101,4 +102,18 @@ public class JDBCUtilTest {
         props.setProperty("AutoCommit", "false");
         assertFalse(JDBCUtil.getAutoCommit("localhost", props, false));
     }
+
+    @Test
+    public void testGetConsistency_TIMELINE_InUrl() {
+        assertTrue(JDBCUtil.getConsistencyLevel("localhost;Consistency=TIMELINE", new Properties(),
+                Consistency.STRONG.toString()) == Consistency.TIMELINE);
+    }
+
+    @Test
+    public void testGetConsistency_TIMELINE_InProperties() {
+        Properties props = new Properties();
+        props.setProperty(PhoenixRuntime.CONSISTENCY_ATTRIB, "TIMELINE");
+        assertTrue(JDBCUtil.getConsistencyLevel("localhost", props, Consistency.STRONG.toString())
+                == Consistency.TIMELINE);
+    }
 }


[35/50] [abbrv] phoenix git commit: PHOENIX-1826 Implement TrackOrderPreservingExpressionCompiler as Expression visitor instead of ParseNode visitor

Posted by ma...@apache.org.
PHOENIX-1826 Implement TrackOrderPreservingExpressionCompiler as Expression visitor instead of ParseNode visitor


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2f0b51cb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2f0b51cb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2f0b51cb

Branch: refs/heads/calcite
Commit: 2f0b51cbe8db817471d87d2521508ba6e42174e9
Parents: 795debf
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Apr 13 16:27:20 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Apr 13 16:27:20 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/end2end/DerivedTableIT.java  |   4 +-
 .../org/apache/phoenix/end2end/HashJoinIT.java  |   7 -
 .../org/apache/phoenix/end2end/OrderByIT.java   |  13 +-
 .../org/apache/phoenix/end2end/SubqueryIT.java  |   8 +-
 .../end2end/SubqueryUsingSortMergeJoinIT.java   |  20 +-
 .../phoenix/end2end/VariableLengthPKIT.java     |   2 +-
 .../index/GlobalIndexOptimizationIT.java        |  11 +-
 .../apache/phoenix/compile/GroupByCompiler.java |  56 ++--
 .../apache/phoenix/compile/OrderByCompiler.java |  46 ++--
 .../phoenix/compile/OrderPreservingTracker.java | 259 +++++++++++++++++++
 .../TrackOrderPreservingExpressionCompiler.java | 249 ------------------
 .../phoenix/compile/QueryCompilerTest.java      | 108 +++++++-
 .../phoenix/compile/QueryOptimizerTest.java     |   7 +-
 13 files changed, 441 insertions(+), 349 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
index 7443267..b7c4906 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
@@ -309,8 +309,8 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
             rs = conn.createStatement().executeQuery("EXPLAIN " + query);
             assertEquals(plans[0], QueryUtil.getExplainPlan(rs));
             
-            // distinct b (groupby b, a) groupby a
-            query = "SELECT DISTINCT COLLECTDISTINCT(t.b) FROM (SELECT b_string b, a_string a FROM aTable GROUP BY b_string, a_string) AS t GROUP BY t.a";
+            // distinct b (groupby a, b) groupby a
+            query = "SELECT DISTINCT COLLECTDISTINCT(t.b) FROM (SELECT b_string b, a_string a FROM aTable GROUP BY a_string, b_string) AS t GROUP BY t.a";
             statement = conn.prepareStatement(query);
             rs = statement.executeQuery();
             assertTrue (rs.next());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 1a2a1d0..a03204a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -118,7 +118,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.NAME]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME,
                 /* 
@@ -156,7 +155,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.NAME]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME,
                 /*
@@ -307,7 +305,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.NAME]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME,
                 /* 
@@ -495,7 +492,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [\"I.0:NAME\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY",
@@ -687,7 +683,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.NAME]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY",
@@ -876,7 +871,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [\"I.0:NAME\"]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [\"I.0:NAME\"]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "" + JOIN_ITEM_TABLE_DISPLAY_NAME +" [-32768]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
@@ -1085,7 +1079,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                 "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "    SERVER AGGREGATE INTO DISTINCT ROWS BY [I.NAME]\n" +
                 "CLIENT MERGE SORT\n" +
-                "CLIENT SORTED BY [I.NAME]\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX +""+JOIN_ITEM_TABLE_DISPLAY_NAME+" [-32768]\n"+
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
index 74eb7fe..9fc3003 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
@@ -30,7 +30,6 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -499,9 +498,15 @@ public class OrderByIT extends BaseClientManagedTimeIT {
             stmt.execute();
             conn.commit();
 
-            String query = "SELECT col1+col2, col4, TRUNC(col3, 'HOUR') FROM e_table ORDER BY 1, 2";
-            conn.createStatement().executeQuery(query);
-            fail();
+            String query = "SELECT col1+col2, col4, a_string FROM e_table ORDER BY 1, 2";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("a", rs.getString(3));
+            assertTrue(rs.next());
+            assertEquals("c", rs.getString(3));
+            assertTrue(rs.next());
+            assertEquals("b", rs.getString(3));
+            assertFalse(rs.next());
         } catch (SQLException e) {
         } finally {
             conn.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
index f655e0a..13354da 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
@@ -200,7 +200,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -209,7 +209,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -274,7 +274,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
@@ -283,7 +283,7 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
                 "    PARALLEL LEFT-JOIN TABLE 1\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
                 "            PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
index 59f75e5..cb9f4b1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryUsingSortMergeJoinIT.java
@@ -121,7 +121,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " ['000000000000001'] - [*]\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [\"item_id\"]\n" +
                 "CLIENT SORTED BY [I.NAME]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -132,7 +131,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "            SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"]\\\n" +
@@ -142,7 +140,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\".+.item_id\", .+.NAME\\]\n" +
                 "        SKIP-SCAN-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -156,7 +153,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"O.customer_id\"\\]\n" +
                 "        PARALLEL INNER-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "        PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
@@ -187,7 +183,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " ['000000000000001'] - [*]\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [\"item_id\"]\n" +
                 "CLIENT SORTED BY [\"I.0:NAME\"]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -197,9 +192,8 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -208,9 +202,8 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SCHEMA + ".idx_item\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -227,7 +220,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"O.customer_id\"\\]\n" +
                 "        PARALLEL INNER-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "        PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +
@@ -257,7 +249,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + " ['000000000000001'] - [*]\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY [\"item_id\"]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY [\"item_id\"]\n" +
                 "CLIENT SORTED BY [\"I.0:NAME\"]",
 
                 "SORT-MERGE-JOIN \\(LEFT\\) TABLES\n" +
@@ -267,9 +258,8 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "    AND\n" +
                 "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "            SERVER FILTER BY FIRST KEY ONLY\n" +
-                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "            SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        CLIENT MERGE SORT\n" +
-                "        CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "            PARALLEL ANTI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "                CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -278,9 +268,8 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "AND\n" +
                 "    CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + JOIN_ITEM_TABLE_DISPLAY_NAME + " \\[-32768\\]\n" +
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
-                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.0:NAME\", \".+.:item_id\"\\]\n" +
+                "        SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\".+.:item_id\", \".+.0:NAME\"\\]\n" +
                 "        PARALLEL SEMI-JOIN TABLE 0 \\(SKIP MERGE\\)\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "                SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"item_id\"\\]\n" +
@@ -298,7 +287,6 @@ public class SubqueryUsingSortMergeJoinIT extends BaseHBaseManagedTimeIT {
                 "        SERVER FILTER BY FIRST KEY ONLY\n" +
                 "        SERVER AGGREGATE INTO DISTINCT ROWS BY \\[\"O.customer_id\"\\]\n" +
                 "    CLIENT MERGE SORT\n" +
-                "    CLIENT SORTED BY \\[\"O.customer_id\"\\]\n" +
                 "        PARALLEL INNER-JOIN TABLE 0\n" +
                 "            CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" +
                 "        PARALLEL LEFT-JOIN TABLE 1\\(DELAYED EVALUATION\\)\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
index b7bc7cc..1e48f8c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
@@ -1090,7 +1090,7 @@ public class VariableLengthPKIT extends BaseClientManagedTimeIT {
     @Test
     public void testMultiFixedLengthNull() throws Exception {
         long ts = nextTimestamp();
-        String query = "SELECT B_INTEGER,C_INTEGER,COUNT(1) FROM BTABLE GROUP BY C_INTEGER,B_INTEGER";
+        String query = "SELECT B_INTEGER,C_INTEGER,COUNT(1) FROM BTABLE GROUP BY B_INTEGER,C_INTEGER";
         String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
index 07d87b7..b97176f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/GlobalIndexOptimizationIT.java
@@ -198,7 +198,7 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             
             expected = 
                     "CLIENT PARALLEL \\d-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME + "\n" +
-                            "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[T.T_ID, T.V1, T.K3\\]\n" +
+                            "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[T.V1, T.T_ID, T.K3\\]\n" +
                             "CLIENT MERGE SORT\n" +
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " \\[\\*\\] - \\['z'\\]\n" +
@@ -209,10 +209,6 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             
             rs = conn1.createStatement().executeQuery(query);
             assertTrue(rs.next());
-            assertEquals("b", rs.getString("t_id"));;
-            assertEquals(4, rs.getInt("k3"));
-            assertEquals("z", rs.getString("V1"));
-            assertTrue(rs.next());
             assertEquals("f", rs.getString("t_id"));
             assertEquals(3, rs.getInt("k3"));
             assertEquals("a", rs.getString("V1"));
@@ -224,6 +220,10 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
             assertEquals("q", rs.getString("t_id"));
             assertEquals(1, rs.getInt("k3"));
             assertEquals("c", rs.getString("V1"));
+            assertTrue(rs.next());
+            assertEquals("b", rs.getString("t_id"));;
+            assertEquals(4, rs.getInt("k3"));
+            assertEquals("z", rs.getString("V1"));
             assertFalse(rs.next());
             
             query = "SELECT /*+ INDEX(" + TestUtil.DEFAULT_DATA_TABLE_NAME + " " + TestUtil.DEFAULT_INDEX_TABLE_NAME + ")*/ v1,sum(k3) from " + TestUtil.DEFAULT_DATA_TABLE_FULL_NAME + " where v1 <='z'  group by v1 order by v1";
@@ -233,7 +233,6 @@ public class GlobalIndexOptimizationIT extends BaseHBaseManagedTimeIT {
                     "CLIENT PARALLEL \\d-WAY FULL SCAN OVER T\n" +
                             "    SERVER AGGREGATE INTO DISTINCT ROWS BY \\[T.V1\\]\n" +
                             "CLIENT MERGE SORT\n" +
-                            "CLIENT SORTED BY \\[T.V1\\]\n" +
                             "    SKIP-SCAN-JOIN TABLE 0\n" +
                             "        CLIENT PARALLEL 1-WAY RANGE SCAN OVER I \\[\\*\\] - \\['z'\\]\n" +
                             "            SERVER FILTER BY FIRST KEY ONLY\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
index 4f1ba5b..7d9df02 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
@@ -23,9 +23,9 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.http.annotation.Immutable;
-import org.apache.phoenix.compile.TrackOrderPreservingExpressionCompiler.Entry;
-import org.apache.phoenix.compile.TrackOrderPreservingExpressionCompiler.Ordering;
+import org.apache.phoenix.compile.OrderPreservingTracker.Ordering;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -158,29 +158,30 @@ public class GroupByCompiler {
         }
 
        // Accumulate expressions in GROUP BY
-        TrackOrderPreservingExpressionCompiler groupByVisitor =
-                new TrackOrderPreservingExpressionCompiler(context, 
-                        GroupBy.EMPTY_GROUP_BY, groupByNodes.size(), 
-                        Ordering.UNORDERED, tupleProjector);
-        for (ParseNode node : groupByNodes) {
-            Expression expression = node.accept(groupByVisitor);
-            if (groupByVisitor.isAggregate()) {
-                throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_GROUP_BY)
-                    .setMessage(expression.toString()).build().buildException();
-            }
+        ExpressionCompiler compiler =
+                new ExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY);
+        List<Pair<Integer,Expression>> groupBys = Lists.newArrayListWithExpectedSize(groupByNodes.size());
+        OrderPreservingTracker tracker = new OrderPreservingTracker(context, GroupBy.EMPTY_GROUP_BY, Ordering.UNORDERED, groupByNodes.size(), tupleProjector);
+        for (int i = 0; i < groupByNodes.size(); i++) {
+            ParseNode node = groupByNodes.get(i);
+            Expression expression = node.accept(compiler);
             if (!expression.isStateless()) {
-                groupByVisitor.addEntry(expression);
+                if (compiler.isAggregate()) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_GROUP_BY)
+                        .setMessage(expression.toString()).build().buildException();
+                }
+                tracker.track(expression);
+                groupBys.add(new Pair<Integer,Expression>(i,expression));
             }
-            groupByVisitor.reset();
+            compiler.reset();
         }
         
-        List<Entry> groupByEntries = groupByVisitor.getEntries();
-        if (groupByEntries.isEmpty()) {
+        if (groupBys.isEmpty()) {
             return GroupBy.EMPTY_GROUP_BY;
         }
         
-        boolean isRowKeyOrderedGrouping = isInRowKeyOrder && groupByVisitor.isOrderPreserving();
-        List<Expression> expressions = Lists.newArrayListWithCapacity(groupByEntries.size());
+        boolean isRowKeyOrderedGrouping = isInRowKeyOrder && tracker.isOrderPreserving();
+        List<Expression> expressions = Lists.newArrayListWithExpectedSize(groupBys.size());
         List<Expression> keyExpressions = expressions;
         String groupExprAttribName;
         // This is true if the GROUP BY is composed of only PK columns. We further check here that
@@ -188,8 +189,8 @@ public class GroupByCompiler {
         // column and use each subsequent one in PK order).
         if (isRowKeyOrderedGrouping) {
             groupExprAttribName = BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS;
-            for (Entry groupByEntry : groupByEntries) {
-                expressions.add(groupByEntry.getExpression());
+            for (Pair<Integer,Expression> groupBy : groupBys) {
+                expressions.add(groupBy.getSecond());
             }
         } else {
             /*
@@ -211,11 +212,11 @@ public class GroupByCompiler {
              * Within each bucket, order based on the column position in the schema. Putting the fixed width values
              * in the beginning optimizes access to subsequent values.
              */
-            Collections.sort(groupByEntries, new Comparator<Entry>() {
+            Collections.sort(groupBys, new Comparator<Pair<Integer,Expression>>() {
                 @Override
-                public int compare(Entry o1, Entry o2) {
-                    Expression e1 = o1.getExpression();
-                    Expression e2 = o2.getExpression();
+                public int compare(Pair<Integer,Expression> gb1, Pair<Integer,Expression> gb2) {
+                    Expression e1 = gb1.getSecond();
+                    Expression e2 = gb2.getSecond();
                     boolean isFixed1 = e1.getDataType().isFixedWidth();
                     boolean isFixed2 = e2.getDataType().isFixedWidth();
                     boolean isFixedNullable1 = e1.isNullable() &&isFixed1;
@@ -224,7 +225,8 @@ public class GroupByCompiler {
                         if (isFixed1 == isFixed2) {
                             // Not strictly necessary, but forces the order to match the schema
                             // column order (with PK columns before value columns).
-                            return o1.getColumnPosition() - o2.getColumnPosition();
+                            //return o1.getColumnPosition() - o2.getColumnPosition();
+                            return gb1.getFirst() - gb2.getFirst();
                         } else if (isFixed1) {
                             return -1;
                         } else {
@@ -237,8 +239,8 @@ public class GroupByCompiler {
                     }
                 }
             });
-            for (Entry groupByEntry : groupByEntries) {
-                expressions.add(groupByEntry.getExpression());
+            for (Pair<Integer,Expression> groupBy : groupBys) {
+                expressions.add(groupBy.getSecond());
             }
             for (int i = expressions.size()-2; i >= 0; i--) {
                 Expression expression = expressions.get(i);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
index d8e86ad..f0406d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
@@ -24,17 +24,15 @@ import java.util.LinkedHashSet;
 import java.util.List;
 
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
-import org.apache.phoenix.compile.TrackOrderPreservingExpressionCompiler.Ordering;
+import org.apache.phoenix.compile.OrderPreservingTracker.Ordering;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.OrderByExpression;
-import org.apache.phoenix.parse.ColumnParseNode;
 import org.apache.phoenix.parse.LiteralParseNode;
 import org.apache.phoenix.parse.OrderByNode;
 import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.SelectStatement;
-import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTableType;
@@ -90,13 +88,12 @@ public class OrderByCompiler {
         if (orderByNodes.isEmpty()) {
             return OrderBy.EMPTY_ORDER_BY;
         }
+        ExpressionCompiler compiler = new ExpressionCompiler(context, groupBy);
         // accumulate columns in ORDER BY
-        TrackOrderPreservingExpressionCompiler visitor = 
-                new TrackOrderPreservingExpressionCompiler(context, groupBy, 
-                        orderByNodes.size(), Ordering.ORDERED, null);
+        OrderPreservingTracker tracker = 
+                new OrderPreservingTracker(context, groupBy, Ordering.ORDERED, orderByNodes.size());
         LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
         for (OrderByNode node : orderByNodes) {
-            boolean isAscending = node.isAscending();
             ParseNode parseNode = node.getNode();
             Expression expression = null;
             if (parseNode instanceof LiteralParseNode && ((LiteralParseNode)parseNode).getType() == PInteger.INSTANCE){
@@ -104,24 +101,13 @@ public class OrderByCompiler {
                 int size = projector.getColumnProjectors().size();
                 if (index > size || index <= 0 ) {
                     throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND)
-                    .setMessage("").build().buildException();
+                    .build().buildException();
                 }
-                ColumnProjector colProj = projector.getColumnProjector(index-1);
-                TableName  tableName = null;
-                if (statement.getSelects().size() > 0 )
-                    tableName = TableName.create(context.getCurrentTable().getTable().getName().toString(), null);
-                else {
-                    tableName =  TableName.create(context.getResolver().getTables().get(0).getTable().getSchemaName().toString(), 
-                            context.getResolver().getTables().get(0).getTable().getTableName().toString());
-                }
-                ColumnParseNode colParseNode = new ColumnParseNode(tableName, colProj.getName(), null);
-                expression = colParseNode.accept(visitor);
+                expression = projector.getColumnProjector(index-1).getExpression();
             } else {
-                expression = node.getNode().accept(visitor);
-            }
-            if (!expression.isStateless() && visitor.addEntry(expression, isAscending ? SortOrder.ASC : SortOrder.DESC)) {
+                expression = node.getNode().accept(compiler);
                 // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns)
-                if (!visitor.isAggregate()) {
+                if (!expression.isStateless() && !compiler.isAggregate()) {
                     if (statement.isAggregate() || statement.isDistinct()) {
                         // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x
                         if (statement.isDistinct()) {
@@ -131,21 +117,29 @@ public class OrderByCompiler {
                         ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
                     }
                 }
+            }
+            if (!expression.isStateless()) {
+                boolean isAscending = node.isAscending();
+                boolean isNullsLast = node.isNullsLast();
+                tracker.track(expression, isAscending ? SortOrder.ASC : SortOrder.DESC, isNullsLast);
+                // FIXME: this isn't correct. If we have a schema where column A is DESC,
+                // An ORDER BY A should still be ASC.
                 if (expression.getSortOrder() == SortOrder.DESC) {
                     isAscending = !isAscending;
+                    isNullsLast = !isNullsLast;
                 }
-                OrderByExpression orderByExpression = new OrderByExpression(expression, node.isNullsLast(), isAscending);
+                OrderByExpression orderByExpression = new OrderByExpression(expression, isNullsLast, isAscending);
                 orderByExpressions.add(orderByExpression);
             }
-            visitor.reset();
+            compiler.reset();
         }
        
         if (orderByExpressions.isEmpty()) {
             return OrderBy.EMPTY_ORDER_BY;
         }
         // If we're ordering by the order returned by the scan, we don't need an order by
-        if (isInRowKeyOrder && visitor.isOrderPreserving()) {
-            if (visitor.isReverse()) {
+        if (isInRowKeyOrder && tracker.isOrderPreserving()) {
+            if (tracker.isReverse()) {
                 // Don't use reverse scan if we're using a skip scan, as our skip scan doesn't support this yet.
                 // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types.
                 if (context.getConnection().getQueryServices().getProps().getBoolean(QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
new file mode 100644
index 0000000..1c31606
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.execute.TupleProjector;
+import org.apache.phoenix.expression.CoerceExpression;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.ProjectedColumnExpression;
+import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.expression.function.FunctionExpression.OrderPreserving;
+import org.apache.phoenix.expression.function.ScalarFunction;
+import org.apache.phoenix.expression.visitor.StatelessTraverseNoExpressionVisitor;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.SortOrder;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+
+/**
+ * Determines if the natural key order of the rows returned by the scan
+ * will match the order of the expressions. For GROUP BY, if order is preserved we can use
+ * an optimization during server-side aggregation to do the aggregation on-the-fly versus
+ * keeping track of each distinct group. We can only do this optimization if all the rows
+ * for each group will be contiguous. For ORDER BY, we can drop the ORDER BY statement if
+ * the order is preserved.
+ * 
+ */
+public class OrderPreservingTracker {
+    public enum Ordering {ORDERED, UNORDERED};
+
+    public static class Info {
+        public final OrderPreserving orderPreserving;
+        public final int pkPosition;
+        public final int slotSpan;
+
+        public Info(int pkPosition) {
+            this.pkPosition = pkPosition;
+            this.orderPreserving = OrderPreserving.YES;
+            this.slotSpan = 1;
+        }
+
+        public Info(Info info, OrderPreserving orderPreserving) {
+            this.pkPosition = info.pkPosition;
+            this.slotSpan = info.slotSpan;
+            this.orderPreserving = orderPreserving;
+        }
+
+        public Info(Info info, int slotSpan, OrderPreserving orderPreserving) {
+            this.pkPosition = info.pkPosition;
+            this.slotSpan = slotSpan;
+            this.orderPreserving = orderPreserving;
+        }
+    }
+    private final TrackOrderPreservingExpressionVisitor visitor;
+    private final GroupBy groupBy;
+    private final Ordering ordering;
+    private final int pkPositionOffset;
+    private final List<Info> orderPreservingInfos;
+    private boolean isOrderPreserving = true;
+    private Boolean isReverse = null;
+    
+    public OrderPreservingTracker(StatementContext context, GroupBy groupBy, Ordering ordering, int nNodes) {
+        this(context, groupBy, ordering, nNodes, null);
+    }
+    
+    public OrderPreservingTracker(StatementContext context, GroupBy groupBy, Ordering ordering, int nNodes, TupleProjector projector) {
+        int pkPositionOffset = 0;
+        if (groupBy.isEmpty()) { // FIXME: would the below table have any of these set in the case of a GROUP BY?
+            PTable table = context.getResolver().getTables().get(0).getTable();
+            boolean isSalted = table.getBucketNum() != null;
+            boolean isMultiTenant = context.getConnection().getTenantId() != null && table.isMultiTenant();
+            boolean isSharedViewIndex = table.getViewIndexId() != null;
+            // TODO: util for this offset, as it's computed in numerous places
+            pkPositionOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
+        }
+        this.pkPositionOffset = pkPositionOffset;
+        this.groupBy = groupBy;
+        this.visitor = new TrackOrderPreservingExpressionVisitor(projector);
+        this.orderPreservingInfos = Lists.newArrayListWithExpectedSize(nNodes);
+        this.ordering = ordering;
+    }
+    
+    public void track(Expression node) {
+        SortOrder sortOrder = node.getSortOrder();
+        track(node, sortOrder, sortOrder != SortOrder.getDefault());
+    }
+    
+    public void track(Expression node, SortOrder sortOrder, boolean isNullsLast) {
+        if (isOrderPreserving) {
+            Info info = node.accept(visitor);
+            if (info == null) {
+                isOrderPreserving = false;
+            } else {
+                // If the expression is sorted in a different order than the specified sort order
+                // then the expressions are not order preserving.
+                if (node.getSortOrder() != sortOrder) {
+                    if (isReverse == null) {
+                        isReverse = true;
+                        /*
+                         * When a GROUP BY is not order preserving, we cannot do a reverse
+                         * scan to eliminate the ORDER BY since our server-side scan is not
+                         * ordered in that case.
+                         */
+                        if (!groupBy.isEmpty() && !groupBy.isOrderPreserving()) {
+                            isOrderPreserving = false;
+                            return;
+                        }
+                    } else if (!isReverse){
+                        isOrderPreserving = false;
+                        return;
+                    }
+                } else {
+                    if (isReverse == null) {
+                        isReverse = false;
+                    } else if (isReverse){
+                        isOrderPreserving = false;
+                        return;
+                    }
+                }
+                if (node.isNullable()) {
+                    if (!Boolean.valueOf(isNullsLast).equals(isReverse)) {
+                        isOrderPreserving = false;
+                        return;
+                    }
+                }
+                orderPreservingInfos.add(info);
+            }
+        }
+    }
+    
+    public boolean isOrderPreserving() {
+        if (!isOrderPreserving) {
+            return false;
+        }
+        if (ordering == Ordering.UNORDERED) {
+            // Sort by position
+            Collections.sort(orderPreservingInfos, new Comparator<Info>() {
+                @Override
+                public int compare(Info o1, Info o2) {
+                    return o1.pkPosition-o2.pkPosition;
+                }
+            });
+        }
+        // Determine if there are any gaps in the PK columns (in which case we don't need
+        // to sort in the coprocessor because the keys will already naturally be in sorted
+        // order.
+        int prevSlotSpan = 1;
+        int prevPos = pkPositionOffset - 1;
+        OrderPreserving prevOrderPreserving = OrderPreserving.YES;
+        for (int i = 0; i < orderPreservingInfos.size() && isOrderPreserving; i++) {
+            Info entry = orderPreservingInfos.get(i);
+            int pos = entry.pkPosition;
+            isOrderPreserving &= (entry.orderPreserving != OrderPreserving.NO) && (pos == prevPos || ((pos - prevSlotSpan == prevPos) && (prevOrderPreserving == OrderPreserving.YES)));
+            prevPos = pos;
+            prevSlotSpan = entry.slotSpan;
+            prevOrderPreserving = entry.orderPreserving;
+        }
+        return isOrderPreserving;
+    }
+    
+    public boolean isReverse() {
+        return Boolean.TRUE.equals(isReverse);
+    }
+
+    private static class TrackOrderPreservingExpressionVisitor extends StatelessTraverseNoExpressionVisitor<Info> {
+        private final TupleProjector projector;
+        
+        public TrackOrderPreservingExpressionVisitor(TupleProjector projector) {
+            this.projector = projector;
+        }
+        
+        @Override
+        public Info visit(RowKeyColumnExpression node) {
+            return new Info(node.getPosition());
+        }
+
+        @Override
+        public Info visit(ProjectedColumnExpression node) {
+            if (projector == null) {
+                return super.visit(node);
+            }
+            Expression expression = projector.getExpressions()[node.getPosition()];
+            // FIXME: prevents infinite recursion for union all in subquery, but
+            // should a ProjectedColumnExpression be used in this case? Wouldn't
+            // it make more sense to not create this wrapper in this case?
+            if (expression == node) {
+                return super.visit(node);
+            }
+            return expression.accept(this);
+        }
+
+        @Override
+        public Iterator<Expression> visitEnter(ScalarFunction node) {
+            return node.preservesOrder() == OrderPreserving.NO ? Iterators.<Expression> emptyIterator() : Iterators
+                    .singletonIterator(node.getChildren().get(node.getKeyFormationTraversalIndex()));
+        }
+
+        @Override
+        public Info visitLeave(ScalarFunction node, List<Info> l) {
+            if (l.isEmpty()) { return null; }
+            Info info = l.get(0);
+            // Keep the minimum value between this function and the current value,
+            // so that we never increase OrderPreserving from NO or YES_IF_LAST.
+            OrderPreserving orderPreserving = OrderPreserving.values()[Math.min(node.preservesOrder().ordinal(), info.orderPreserving.ordinal())];
+            if (orderPreserving == info.orderPreserving) {
+                return info;
+            }
+            return new Info(info, orderPreserving);
+        }
+
+        @Override
+        public Iterator<Expression> visitEnter(CoerceExpression node) {
+            return node.getChildren().iterator();
+        }
+
+        @Override
+        public Info visitLeave(CoerceExpression node, List<Info> l) {
+            if (l.isEmpty()) { return null; }
+            return l.get(0);
+        }
+        
+        @Override
+        public Iterator<Expression> visitEnter(RowValueConstructorExpression node) {
+            return node.getChildren().iterator();
+        }
+
+        @Override
+        public Info visitLeave(RowValueConstructorExpression node, List<Info> l) {
+            // Child expression returned null and was filtered, so not order preserving
+            if (l.size() != node.getChildren().size()) { return null; }
+            Info firstInfo = l.get(0);
+            Info lastInfo = firstInfo;
+            // Check that pkPos are consecutive which is the only way a RVC can be order preserving
+            for (int i = 1; i < l.size(); i++) {
+                // not order preserving since it's not last
+                if (lastInfo.orderPreserving == OrderPreserving.YES_IF_LAST) { return null; }
+                Info info = l.get(i);
+                // not order preserving since there's a gap in the pk
+                if (info.pkPosition != lastInfo.pkPosition + 1) { return null; }
+                lastInfo = info;
+            }
+            return new Info(firstInfo, l.size(), lastInfo.orderPreserving);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
deleted file mode 100644
index 9fd6837..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.compile;
-
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
-import org.apache.phoenix.execute.TupleProjector;
-import org.apache.phoenix.expression.Expression;
-import org.apache.phoenix.expression.LiteralExpression;
-import org.apache.phoenix.expression.RowKeyColumnExpression;
-import org.apache.phoenix.expression.function.FunctionExpression;
-import org.apache.phoenix.expression.function.FunctionExpression.OrderPreserving;
-import org.apache.phoenix.parse.CaseParseNode;
-import org.apache.phoenix.parse.ColumnParseNode;
-import org.apache.phoenix.parse.DivideParseNode;
-import org.apache.phoenix.parse.MultiplyParseNode;
-import org.apache.phoenix.parse.SubtractParseNode;
-import org.apache.phoenix.schema.ColumnRef;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.schema.SortOrder;
-import com.google.common.collect.Lists;
-
-/**
- * Visitor that builds the expressions of a GROUP BY and ORDER BY clause. While traversing
- * the parse node tree, the visitor also determines if the natural key order of the scan
- * will match the order of the expressions. For GROUP BY, if order is preserved we can use
- * an optimization during server-side aggregation to do the aggregation on-the-fly versus
- * keeping track of each distinct group. We can only do this optimization if all the rows
- * for each group will be contiguous. For ORDER BY, we can drop the ORDER BY statement if
- * the order is preserved.
- * 
- */
-public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
-    public enum Ordering {ORDERED, UNORDERED};
-    
-    private final List<Entry> entries;
-    private final Ordering ordering;
-    private final int positionOffset;
-    private final TupleProjector tupleProjector; // for derived-table query compilation
-    private OrderPreserving orderPreserving = OrderPreserving.YES;
-    private ColumnRef columnRef;
-    private boolean isOrderPreserving = true;
-    private Boolean isReverse;
-    
-    TrackOrderPreservingExpressionCompiler(StatementContext context, GroupBy groupBy, int expectedEntrySize, Ordering ordering, TupleProjector tupleProjector) {
-        super(context, groupBy);
-        PTable table = context.getResolver().getTables().get(0).getTable();
-        boolean isSalted = table.getBucketNum() != null;
-        boolean isMultiTenant = context.getConnection().getTenantId() != null && table.isMultiTenant();
-        boolean isSharedViewIndex = table.getViewIndexId() != null;
-        // TODO: util for this offset, as it's computed in numerous places
-        positionOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
-        entries = Lists.newArrayListWithExpectedSize(expectedEntrySize);
-        this.ordering = ordering;
-        this.tupleProjector = tupleProjector;
-    }
-    
-    public Boolean isReverse() {
-        return isReverse;
-    }
-
-    public boolean isOrderPreserving() {
-        if (!isOrderPreserving) {
-            return false;
-        }
-        if (ordering == Ordering.UNORDERED) {
-            // Sort by position
-            Collections.sort(entries, new Comparator<Entry>() {
-                @Override
-                public int compare(Entry o1, Entry o2) {
-                    return o1.getPkPosition()-o2.getPkPosition();
-                }
-            });
-        }
-        // Determine if there are any gaps in the PK columns (in which case we don't need
-        // to sort in the coprocessor because the keys will already naturally be in sorted
-        // order.
-        int prevPos = positionOffset - 1;
-        OrderPreserving prevOrderPreserving = OrderPreserving.YES;
-        for (int i = 0; i < entries.size() && isOrderPreserving; i++) {
-            Entry entry = entries.get(i);
-            int pos = entry.getPkPosition();
-            isOrderPreserving &= (entry.getOrderPreserving() != OrderPreserving.NO) && (pos == prevPos || ((pos - 1 == prevPos) && (prevOrderPreserving == OrderPreserving.YES)));
-            prevPos = pos;
-            prevOrderPreserving = entries.get(i).getOrderPreserving();
-        }
-        return isOrderPreserving;
-    }
-    
-    @Override
-    protected Expression addExpression(Expression expression) {
-        // TODO: have FunctionExpression visitor instead and remove this cast
-        if (expression instanceof FunctionExpression) {
-            // Keep the minimum value between this function and the current value,
-            // so that we never increase OrderPreserving from NO or YES_IF_LAST.
-            orderPreserving = OrderPreserving.values()[Math.min(orderPreserving.ordinal(), ((FunctionExpression)expression).preservesOrder().ordinal())];
-        }
-        return super.addExpression(expression);
-    }
-
-    @Override
-    public boolean visitEnter(CaseParseNode node) throws SQLException {
-        orderPreserving = OrderPreserving.NO;
-        return super.visitEnter(node);
-    }
-    
-    @Override
-    public boolean visitEnter(DivideParseNode node) throws SQLException {
-        // A divide expression may not preserve row order.
-        // For example: GROUP BY 1/x
-        orderPreserving = OrderPreserving.NO;
-        return super.visitEnter(node);
-    }
-
-    @Override
-    public boolean visitEnter(SubtractParseNode node) throws SQLException {
-        // A subtract expression may not preserve row order.
-        // For example: GROUP BY 10 - x
-        orderPreserving = OrderPreserving.NO;
-        return super.visitEnter(node);
-    }
-
-    @Override
-    public boolean visitEnter(MultiplyParseNode node) throws SQLException {
-        // A multiply expression may not preserve row order.
-        // For example: GROUP BY -1 * x
-        orderPreserving = OrderPreserving.NO;
-        return super.visitEnter(node);
-    }
-
-    @Override
-    public void reset() {
-        super.reset();
-        columnRef = null;
-        orderPreserving = OrderPreserving.YES;
-    }
-    
-    @Override
-    protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException {
-        ColumnRef ref = super.resolveColumn(node);
-        // If we encounter any non PK column, then we can't aggregate on-the-fly
-        // because the distinct groups have no correlation to the KV column value
-        if (getColumnPKPosition(ref) < 0) {
-            orderPreserving = OrderPreserving.NO;
-        }
-        
-        if (columnRef == null) {
-            columnRef = ref;
-        } else if (!columnRef.equals(ref)) {
-            // If we encounter more than one column reference in an expression,
-            // we can't assume the result of the expression will be key ordered.
-            // For example GROUP BY a * b
-            orderPreserving = OrderPreserving.NO;
-        }
-        return ref;
-    }
-    
-    private int getColumnPKPosition(ColumnRef ref) {
-        if (tupleProjector != null && ref.getTable().getType() == PTableType.SUBQUERY) {
-            Expression expression = tupleProjector.getExpressions()[ref.getColumnPosition()];
-            if (expression instanceof RowKeyColumnExpression) {
-                return ((RowKeyColumnExpression) expression).getPosition();
-            }
-        }
-        
-        return ref.getPKSlotPosition();
-    }
-
-    public boolean addEntry(Expression expression) {
-        if (expression instanceof LiteralExpression) {
-            return false;
-        }
-        isOrderPreserving &= (orderPreserving != OrderPreserving.NO);
-        entries.add(new Entry(expression, columnRef, orderPreserving));
-        return true;
-    }
-    
-    public boolean addEntry(Expression expression, SortOrder sortOrder) {
-        // If the expression is sorted in a different order than the specified sort order
-        // then the expressions are not order preserving.
-        if (expression.getSortOrder() != sortOrder) {
-            if (isReverse == null) {
-                isReverse = true;
-            } else if (!isReverse){
-                orderPreserving = OrderPreserving.NO;
-            }
-        } else {
-            if (isReverse == null) {
-                isReverse = false;
-            } else if (isReverse){
-                orderPreserving = OrderPreserving.NO;
-            }
-        }
-        return addEntry(expression);
-    }
-    
-    public List<Entry> getEntries() {
-        return entries;
-    }
-
-    public class Entry {
-        private final Expression expression;
-        private final ColumnRef columnRef;
-        private final OrderPreserving orderPreserving;
-        
-        private Entry(Expression expression, ColumnRef columnRef, OrderPreserving orderPreserving) {
-            this.expression = expression;
-            this.columnRef = columnRef;
-            this.orderPreserving = orderPreserving;
-        }
-
-        public Expression getExpression() {
-            return expression;
-        }
-
-        public int getPkPosition() {
-            return getColumnPKPosition(columnRef);
-        }
-
-        public int getColumnPosition() {
-            return columnRef.getColumnPosition();
-        }
-
-        public OrderPreserving getOrderPreserving() {
-            return orderPreserving;
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 77c1f9e..77eb237 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -41,6 +41,7 @@ import java.util.Properties;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.expression.Expression;
@@ -52,6 +53,7 @@ import org.apache.phoenix.expression.function.TimeUnit;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.AmbiguousColumnException;
@@ -1450,8 +1452,8 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("CREATE TABLE t (k1 varchar, k2 varchar, v varchar, constraint pk primary key(k1,k2))");
         ResultSet rs;
         String[] queries = {
-                "SELECT DISTINCT v FROM T ORDER BY v LIMIT 3",
-                "SELECT v FROM T GROUP BY v,k1 ORDER BY v LIMIT 3",
+//                "SELECT DISTINCT v FROM T ORDER BY v LIMIT 3",
+//                "SELECT v FROM T GROUP BY v,k1 ORDER BY v LIMIT 3",
                 "SELECT DISTINCT count(*) FROM T GROUP BY k1 LIMIT 3",
                 "SELECT count(1) FROM T GROUP BY v,k1 LIMIT 3",
                 "SELECT max(v) FROM T GROUP BY k1,k2 HAVING count(k1) > 1 LIMIT 3",
@@ -1461,7 +1463,8 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
         for (int i = 0; i < queries.length; i++) {
             query = queries[i];
             rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-            assertFalse("Did not expected to find GROUP BY limit optimization in: " + query, QueryUtil.getExplainPlan(rs).contains(" LIMIT 3 GROUPS"));
+            String explainPlan = QueryUtil.getExplainPlan(rs);
+            assertFalse("Did not expected to find GROUP BY limit optimization in: " + query, explainPlan.contains(" LIMIT 3 GROUPS"));
         }
     }
     
@@ -1631,4 +1634,103 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
     }
 
    
+    @Test
+    public void testOrderByOrderPreservingFwd() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))");
+        String[] queries = {
+                "SELECT * FROM T ORDER BY (k1,k2), k3",
+                "SELECT * FROM T ORDER BY k1,k2,k3",
+                "SELECT * FROM T ORDER BY k1,k2",
+                "SELECT * FROM T ORDER BY k1",
+                "SELECT * FROM T ORDER BY CAST(k1 AS TIMESTAMP)",
+                "SELECT * FROM T ORDER BY (k1,k2,k3)",
+                "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY'), CEIL(k2, 'HOUR')",
+                "SELECT * FROM T ORDER BY INVERT(k1) DESC",
+                };
+        String query;
+        for (int i = 0; i < queries.length; i++) {
+            query = queries[i];
+            QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+            assertTrue("Expected order by to be compiled out: " + query, plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY);
+        }
+    }
+    
+    @Test
+    public void testOrderByOrderPreservingRev() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2 DESC,k3))");
+        String[] queries = {
+                "SELECT * FROM T ORDER BY INVERT(k1),k2",
+                "SELECT * FROM T ORDER BY INVERT(k1)",
+                 "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY') DESC, CEIL(k2, 'HOUR') DESC",
+                "SELECT * FROM T ORDER BY k1 DESC",
+                };
+        String query;
+        for (int i = 0; i < queries.length; i++) {
+            query = queries[i];
+            QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+            assertTrue("Expected order by to be compiled out: " + query, plan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY);
+        }
+    }
+    
+    @Test
+    public void testNotOrderByOrderPreserving() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))");
+        String[] queries = {
+                "SELECT * FROM T ORDER BY k1,k3",
+                "SELECT * FROM T ORDER BY SUBSTR(TO_CHAR(k1),1,4)",
+                "SELECT * FROM T ORDER BY k2",
+                "SELECT * FROM T ORDER BY INVERT(k1),k3",
+                "SELECT * FROM T ORDER BY CASE WHEN k1 = CURRENT_DATE() THEN 0 ELSE 1 END",
+                "SELECT * FROM T ORDER BY TO_CHAR(k1)",
+                };
+        String query;
+        for (int i = 0; i < queries.length; i++) {
+            query = queries[i];
+            QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+            assertFalse("Expected order by not to be compiled out: " + query, plan.getOrderBy().getOrderByExpressions().isEmpty());
+        }
+    }
+    
+    @Test
+    public void testGroupByOrderPreserving() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))");
+        String[] queries = {
+                "SELECT 1 FROM T GROUP BY k3, (k1,k2)",
+                "SELECT 1 FROM T GROUP BY k2,k1,k3",
+                "SELECT 1 FROM T GROUP BY k1,k2",
+                "SELECT 1 FROM T GROUP BY k1",
+                "SELECT 1 FROM T GROUP BY CAST(k1 AS TIMESTAMP)",
+                "SELECT 1 FROM T GROUP BY (k1,k2,k3)",
+                "SELECT 1 FROM T GROUP BY TRUNC(k2, 'DAY'), CEIL(k1, 'HOUR')",
+                };
+        String query;
+        for (int i = 0; i < queries.length; i++) {
+            query = queries[i];
+            QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+            assertTrue("Expected group by to be order preserving: " + query, plan.getGroupBy().isOrderPreserving());
+        }
+    }
+    
+    @Test
+    public void testNotGroupByOrderPreserving() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))");
+        String[] queries = {
+                "SELECT 1 FROM T GROUP BY k1,k3",
+                "SELECT 1 FROM T GROUP BY k2",
+                "SELECT 1 FROM T GROUP BY INVERT(k1),k3",
+                "SELECT 1 FROM T GROUP BY CASE WHEN k1 = CURRENT_DATE() THEN 0 ELSE 1 END",
+                "SELECT 1 FROM T GROUP BY TO_CHAR(k1)",
+                };
+        String query;
+        for (int i = 0; i < queries.length; i++) {
+            query = queries[i];
+            QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+            assertFalse("Expected group by not to be order preserving: " + query, plan.getGroupBy().isOrderPreserving());
+        }
+    }    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2f0b51cb/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
index 67c44bd..cd51683 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.compile;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.sql.Array;
 import java.sql.Connection;
@@ -33,7 +34,6 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
@@ -84,9 +84,8 @@ public class QueryOptimizerTest extends BaseConnectionlessQueryTest {
         try{ 
             conn.createStatement().execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true");
             PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
-            QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY 1,2,3");
-        } catch (SQLException e) {
-            assertEquals(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND.getErrorCode(), e.getErrorCode());
+            QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY 'a','b','c'");
+            assertTrue(plan.getOrderBy().getOrderByExpressions().isEmpty());
         } finally {
             conn.close();
         }


[15/50] [abbrv] phoenix git commit: PHOENIX-1748 Applying TRUNC|ROUND|FLOOR|CEIL on TIMESTAMP should maintain return type of TIMESTAMP (Dave Hacker)

Posted by ma...@apache.org.
PHOENIX-1748 Applying TRUNC|ROUND|FLOOR|CEIL on TIMESTAMP should maintain return type of TIMESTAMP (Dave Hacker)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f766a780
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f766a780
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f766a780

Branch: refs/heads/calcite
Commit: f766a780c2941ec30911989de5c28852ebfeb9bf
Parents: 1e28061
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Thu Apr 2 11:51:04 2015 -0700
Committer: Thomas D'Silva <tw...@gmail.com>
Committed: Thu Apr 2 11:51:04 2015 -0700

----------------------------------------------------------------------
 .../RoundFloorCeilFunctionsEnd2EndIT.java       | 114 +++++++++++++++++++
 1 file changed, 114 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f766a780/phoenix-core/src/it/java/org/apache/phoenix/end2end/RoundFloorCeilFunctionsEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RoundFloorCeilFunctionsEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RoundFloorCeilFunctionsEnd2EndIT.java
index 2cf08e9..42635c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RoundFloorCeilFunctionsEnd2EndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RoundFloorCeilFunctionsEnd2EndIT.java
@@ -29,10 +29,12 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.Time;
 import java.sql.Timestamp;
+import java.util.Properties;
 
 import org.apache.phoenix.expression.function.CeilFunction;
 import org.apache.phoenix.expression.function.FloorFunction;
 import org.apache.phoenix.expression.function.RoundFunction;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.DateUtil;
 import org.junit.Before;
 import org.junit.Test;
@@ -439,5 +441,117 @@ public class RoundFloorCeilFunctionsEnd2EndIT extends BaseHBaseManagedTimeIT {
 		assertEquals(0, Floats.compare(1.26f, rs.getFloat(3)));
 		assertEquals(0, Floats.compare(1.264f, rs.getFloat(4)));
 	}	
+	
+	@Test
+	public void testTimestampAggregateFunctions() throws Exception {
+		String dateString = "2015-03-08 09:09:11.665";
+		Properties props = new Properties();
+		props.setProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, "GMT+1");
+		Connection conn = DriverManager.getConnection(getUrl(), props);
+		try {
+			conn.prepareStatement(
+					"create table TIME_AGG_TABLE("
+							+ "ID unsigned_int NOT NULL, "
+							+ "THE_DATE TIMESTAMP, "
+							+ "constraint PK primary key (ID))").execute();
+			PreparedStatement stmt = conn.prepareStatement("upsert into "
+					+ "TIME_AGG_TABLE(" + "    ID, " + "    THE_DATE)"
+					+ "VALUES (?, ?)");
+			stmt.setInt(1, 1);
+			stmt.setTimestamp(2, DateUtil.parseTimestamp(dateString));
+			stmt.execute();
+			conn.commit();
+
+			ResultSet rs = conn.prepareStatement(
+					"SELECT THE_DATE ,TRUNC(THE_DATE,'DAY') AS day_from_dt "
+							+ ",TRUNC(THE_DATE,'HOUR') AS hour_from_dt "
+							+ ",TRUNC(THE_DATE,'MINUTE') AS min_from_dt "
+							+ ",TRUNC(THE_DATE,'SECOND') AS sec_from_dt "
+							+ ",TRUNC(THE_DATE,'MILLISECOND') AS mil_from_dt "
+							+ "FROM TIME_AGG_TABLE").executeQuery();
+			assertTrue(rs.next());
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("THE_DATE"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 00:00:00.0"),
+					rs.getTimestamp("day_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:00:00.0"),
+					rs.getTimestamp("hour_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:00.0"),
+					rs.getTimestamp("min_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.0"),
+					rs.getTimestamp("sec_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("mil_from_dt"));
+			rs.close();
+
+			rs = conn.prepareStatement(
+					"SELECT THE_DATE ,ROUND(THE_DATE,'DAY') AS day_from_dt "
+							+ ",ROUND(THE_DATE,'HOUR') AS hour_from_dt "
+							+ ",ROUND(THE_DATE,'MINUTE') AS min_from_dt "
+							+ ",ROUND(THE_DATE,'SECOND') AS sec_from_dt "
+							+ ",ROUND(THE_DATE,'MILLISECOND') AS mil_from_dt "
+							+ "FROM TIME_AGG_TABLE").executeQuery();
+			assertTrue(rs.next());
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("THE_DATE"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 00:00:00.0"),
+					rs.getTimestamp("day_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:00:00.0"),
+					rs.getTimestamp("hour_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:00.0"),
+					rs.getTimestamp("min_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:12.0"),
+					rs.getTimestamp("sec_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("mil_from_dt"));
+			rs.close();
+
+			rs = conn.prepareStatement(
+					"SELECT THE_DATE ,FLOOR(THE_DATE,'DAY') AS day_from_dt "
+							+ ",FLOOR(THE_DATE,'HOUR') AS hour_from_dt "
+							+ ",FLOOR(THE_DATE,'MINUTE') AS min_from_dt "
+							+ ",FLOOR(THE_DATE,'SECOND') AS sec_from_dt "
+							+ ",FLOOR(THE_DATE,'MILLISECOND') AS mil_from_dt "
+							+ "FROM TIME_AGG_TABLE").executeQuery();
+			assertTrue(rs.next());
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("THE_DATE"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 00:00:00.0"),
+					rs.getTimestamp("day_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:00:00.0"),
+					rs.getTimestamp("hour_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:00.0"),
+					rs.getTimestamp("min_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.0"),
+					rs.getTimestamp("sec_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("mil_from_dt"));
+			rs.close();
+
+			rs = conn.prepareStatement(
+					"SELECT THE_DATE ,CEIL(THE_DATE,'DAY') AS day_from_dt "
+							+ ",CEIL(THE_DATE,'HOUR') AS hour_from_dt "
+							+ ",CEIL(THE_DATE,'MINUTE') AS min_from_dt "
+							+ ",CEIL(THE_DATE,'SECOND') AS sec_from_dt "
+							+ ",CEIL(THE_DATE,'MILLISECOND') AS mil_from_dt "
+							+ "FROM TIME_AGG_TABLE").executeQuery();
+			assertTrue(rs.next());
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("THE_DATE"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-09 00:00:00.0"),
+					rs.getTimestamp("day_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 10:00:00.0"),
+					rs.getTimestamp("hour_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:10:00.0"),
+					rs.getTimestamp("min_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:12.0"),
+					rs.getTimestamp("sec_from_dt"));
+			assertEquals(DateUtil.parseTimestamp("2015-03-08 09:09:11.665"),
+					rs.getTimestamp("mil_from_dt"));
+			rs.close();
+		} finally {
+			conn.close();
+		}
+	}
 
 }


[21/50] [abbrv] phoenix git commit: PHOENIX-1071 Add phoenix-spark for Spark integration

Posted by ma...@apache.org.
PHOENIX-1071 Add phoenix-spark for Spark integration


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f2d9080d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f2d9080d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f2d9080d

Branch: refs/heads/calcite
Commit: f2d9080d25006d7737286919498fa1999c9bf78c
Parents: 742ca13
Author: ravimagham <ra...@apache.org>
Authored: Sun Apr 5 01:05:12 2015 -0700
Committer: ravimagham <ra...@apache.org>
Committed: Sun Apr 5 01:05:12 2015 -0700

----------------------------------------------------------------------
 NOTICE                                          |   5 +
 phoenix-spark/README.md                         |  89 ++++
 phoenix-spark/pom.xml                           | 523 +++++++++++++++++++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 166 ++++++
 .../phoenix/spark/PhoenixRecordWritable.scala   |  89 ++++
 .../phoenix/spark/ProductRDDFunctions.scala     |  68 +++
 .../phoenix/spark/SparkContextFunctions.scala   |  41 ++
 .../spark/SparkSqlContextFunctions.scala        |  39 ++
 .../org/apache/phoenix/spark/package.scala      |  32 ++
 phoenix-spark/src/test/resources/log4j.xml      |  41 ++
 phoenix-spark/src/test/resources/setup.sql      |  18 +
 .../apache/phoenix/spark/PhoenixRDDTest.scala   | 333 ++++++++++++
 pom.xml                                         |   1 +
 13 files changed, 1445 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/NOTICE
----------------------------------------------------------------------
diff --git a/NOTICE b/NOTICE
index 0836479..0bd2251 100644
--- a/NOTICE
+++ b/NOTICE
@@ -23,3 +23,8 @@ Copyright (c) 2003-2008, Terrence Parr.
 
 JUnit (http://www.junit.org/) included under the Common Public License v1.0.
 See the full text here: http://junit.sourceforge.net/cpl-v10.html
+
+The phoenix-spark module has been adapted from the phoenix-spark library
+distributed under the terms of the Apache 2 license. Original source copyright:
+Copyright 2014 Simply Measured, Inc.
+Copyright 2015 Interset Software Inc.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/README.md
----------------------------------------------------------------------
diff --git a/phoenix-spark/README.md b/phoenix-spark/README.md
new file mode 100644
index 0000000..1c030f8
--- /dev/null
+++ b/phoenix-spark/README.md
@@ -0,0 +1,89 @@
+phoenix-spark extends Phoenix's MapReduce support to allow Spark to load Phoenix tables as RDDs or
+DataFrames, and enables persisting RDDs of Tuples back to Phoenix.
+
+## Reading Phoenix Tables
+
+Given a Phoenix table with the following DDL
+
+```sql
+CREATE TABLE TABLE1 (ID BIGINT NOT NULL PRIMARY KEY, COL1 VARCHAR);
+UPSERT INTO TABLE1 (ID, COL1) VALUES (1, 'test_row_1');
+UPSERT INTO TABLE1 (ID, COL1) VALUES (2, 'test_row_2');
+```
+
+### Load as a DataFrame
+```scala
+import org.apache.spark.SparkContext
+import org.apache.spark.sql.SQLContext
+import org.apache.phoenix.spark._
+
+val sc = new SparkContext("local", "phoenix-test")
+val sqlContext = new SQLContext(sc)
+
+// Load the columns 'ID' and 'COL1' from TABLE1 as a DataFrame
+val df = sqlContext.phoenixTableAsDataFrame(
+  "TABLE1", Array("ID", "COL1"), zkUrl = Some("phoenix-server:2181")
+)
+
+df.show
+```
+
+### Load as an RDD
+```scala
+import org.apache.spark.SparkContext
+import org.apache.spark.sql.SQLContext
+import org.apache.phoenix.spark._
+
+val sc = new SparkContext("local", "phoenix-test")
+
+// Load the columns 'ID' and 'COL1' from TABLE1 as an RDD
+val rdd: RDD[Map[String, AnyRef]] = sc.phoenixTableAsRDD(
+  "TABLE1", Seq("ID", "COL1"), zkUrl = Some("phoenix-server:2181")
+)
+
+rdd.count()
+
+val firstId = rdd1.first()("ID").asInstanceOf[Long]
+val firstCol = rdd1.first()("COL1").asInstanceOf[String]
+```
+
+## Saving RDDs to Phoenix
+
+Given a Phoenix table with the following DDL
+
+```sql
+CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER);
+```
+
+`saveToPhoenix` is an implicit method on RDD[Product], or an RDD of Tuples. The data types must
+correspond to the Java types Phoenix supports (http://phoenix.apache.org/language/datatypes.html)
+
+```scala
+import org.apache.spark.SparkContext
+import org.apache.phoenix.spark._
+
+val sc = new SparkContext("local", "phoenix-test")
+val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
+
+sc
+  .parallelize(dataSet)
+  .saveToPhoenix(
+    "OUTPUT_TEST_TABLE",
+    Seq("ID","COL1","COL2"),
+    zkUrl = Some("phoenix-server:2181")
+  )
+```
+
+## Notes
+
+The functions `phoenixTableAsDataFrame`, `phoenixTableAsRDD` and `saveToPhoenix` all support
+optionally specifying a `conf` Hadoop configuration parameter with custom Phoenix client settings,
+as well as an optional `zkUrl` parameter for the Phoenix connection URL.
+
+If `zkUrl` isn't specified, it's assumed that the "hbase.zookeeper.quorum" property has been set
+in the `conf` parameter. Similarly, if no configuration is passed in, `zkUrl` must be specified.
+
+## Limitations
+
+- No pushdown predicate support from Spark SQL (yet)
+- No support for aggregate or distinct functions (http://phoenix.apache.org/phoenix_mr.html)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
new file mode 100644
index 0000000..3312b09
--- /dev/null
+++ b/phoenix-spark/pom.xml
@@ -0,0 +1,523 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.phoenix</groupId>
+    <artifactId>phoenix</artifactId>
+    <version>4.4.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>phoenix-spark</artifactId>
+  <name>Phoenix - Spark</name>
+
+  <properties>
+    <spark.version>1.3.0</spark.version>
+    <scala.version>2.10.4</scala.version>
+    <scala.binary.version>2.10</scala.binary.version>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.phoenix</groupId>
+      <artifactId>phoenix-core</artifactId>
+    </dependency>
+
+    <!-- Force import of Spark's servlet API for unit tests -->
+    <dependency>
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
+      <version>3.0.1</version>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.scala-lang</groupId>
+      <artifactId>scala-library</artifactId>
+      <version>${scala.version}</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scala.binary.version}</artifactId>
+      <version>2.2.2</version>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.scalamock</groupId>
+      <artifactId>scalamock-scalatest-support_${scala.binary.version}</artifactId>
+      <version>3.1.4</version>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-core_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scala.binary.version}</artifactId>
+      <version>${spark.version}</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.xerial.snappy</groupId>
+      <artifactId>snappy-java</artifactId>
+      <version>1.1.1.6</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <version>${hadoop-two.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop-two.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop-two.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop-two.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.thrift</groupId>
+          <artifactId>thrift</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.thrift</groupId>
+          <artifactId>thrift</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop2-compat</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.thrift</groupId>
+          <artifactId>thrift</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jsp-api-2.1</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jruby</groupId>
+          <artifactId>jruby-complete</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-it</artifactId>
+      <version>${hbase.version}</version>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+      </plugin>
+
+      <plugin>
+        <groupId>net.alchim31.maven</groupId>
+        <artifactId>scala-maven-plugin</artifactId>
+        <version>3.2.0</version>
+        <configuration>
+          <charset>${project.build.sourceEncoding}</charset>
+          <jvmArgs>
+            <jvmArg>-Xmx1024m</jvmArg>
+          </jvmArgs>
+          <scalaVersion>${scala.version}</scalaVersion>
+        </configuration>
+        <executions>
+          <execution>
+            <id>scala-compile-first</id>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>add-source</goal>
+              <goal>compile</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>scala-test-compile</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>testCompile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.scalatest</groupId>
+        <artifactId>scalatest-maven-plugin</artifactId>
+        <version>1.0</version>
+        <configuration>
+          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
+          <junitxml>.</junitxml>
+          <filereports>WDF TestSuite.txt</filereports>
+        </configuration>
+        <executions>
+          <execution>
+            <id>test</id>
+            <phase>test</phase>
+            <goals>
+              <goal>test</goal>
+            </goals>
+            <configuration>
+              <parallel>true</parallel>
+              <tagsToExclude>Integration-Test</tagsToExclude>
+            </configuration>
+          </execution>
+          <execution>
+            <id>integration-test</id>
+            <phase>integration-test</phase>
+            <goals>
+              <goal>test</goal>
+            </goals>
+            <configuration>
+              <parallel>false</parallel>
+              <tagsToInclude>Integration-Test</tagsToInclude>
+              <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
new file mode 100644
index 0000000..b27f9f9
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -0,0 +1,166 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.io.NullWritable
+import org.apache.phoenix.mapreduce.PhoenixInputFormat
+import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil
+import org.apache.phoenix.schema.types._
+import org.apache.phoenix.util.ColumnInfo
+import org.apache.spark._
+import org.apache.spark.annotation.DeveloperApi
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
+import org.apache.spark.sql.types.{DataType, StructField, StructType}
+import org.apache.spark.sql.{Row, DataFrame, SQLContext}
+import org.apache.spark.sql.types._
+import scala.collection.JavaConverters._
+
+class PhoenixRDD(sc: SparkContext, table: String, columns: Seq[String],
+                 predicate: Option[String] = None, zkUrl: Option[String] = None,
+                 @transient conf: Configuration)
+  extends RDD[PhoenixRecordWritable](sc, Nil) with Logging {
+
+  @transient lazy val phoenixConf = {
+    getPhoenixConfiguration
+  }
+
+  val phoenixRDD = sc.newAPIHadoopRDD(phoenixConf,
+    classOf[PhoenixInputFormat[PhoenixRecordWritable]],
+    classOf[NullWritable],
+    classOf[PhoenixRecordWritable])
+
+  override protected def getPartitions: Array[Partition] = {
+    phoenixRDD.partitions
+  }
+
+  @DeveloperApi
+  override def compute(split: Partition, context: TaskContext) = {
+    phoenixRDD.compute(split, context).map(r => r._2)
+  }
+
+  def printPhoenixConfig(conf: Configuration): Unit = {
+    for (mapEntry <- conf.iterator().asScala) {
+      val k = mapEntry.getKey
+      val v = mapEntry.getValue
+
+      if (k.startsWith("phoenix")) {
+        println(s"$k = $v")
+      }
+    }
+  }
+
+  def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
+    val query = "SELECT %s FROM \"%s\"".format(
+      columns.map(f => "\"" + f + "\"").mkString(", "),
+      table
+    )
+
+    query + (predicate match {
+      case Some(p: String) => " WHERE " + p
+      case _ => ""
+    })
+  }
+
+  def getPhoenixConfiguration: Configuration = {
+
+    // This is just simply not serializable, so don't try, but clone it because
+    // PhoenixConfigurationUtil mutates it.
+    val config = new Configuration(conf)
+
+    PhoenixConfigurationUtil.setInputQuery(config, buildSql(table, columns, predicate))
+    PhoenixConfigurationUtil.setSelectColumnNames(config, columns.mkString(","))
+    PhoenixConfigurationUtil.setInputTableName(config, "\"" + table + "\"")
+    PhoenixConfigurationUtil.setInputClass(config, classOf[PhoenixRecordWritable])
+
+    // Override the Zookeeper URL if present. Throw exception if no address given.
+    zkUrl match {
+      case Some(url) => config.set(HConstants.ZOOKEEPER_QUORUM, url )
+      case _ => {
+        if(config.get(HConstants.ZOOKEEPER_QUORUM) == null) {
+          throw new UnsupportedOperationException(
+            s"One of zkUrl or '${HConstants.ZOOKEEPER_QUORUM}' config property must be provided"
+          )
+        }
+      }
+    }
+
+    config
+  }
+
+  // Convert our PhoenixRDD to a DataFrame
+  def toDataFrame(sqlContext: SQLContext): DataFrame = {
+    val columnList = PhoenixConfigurationUtil
+      .getSelectColumnMetadataList(new Configuration(phoenixConf))
+      .asScala
+
+    val columnNames: Seq[String] = columnList.map(ci => {
+      ci.getDisplayName
+    })
+
+    // Lookup the Spark catalyst types from the Phoenix schema
+    val structFields = phoenixSchemaToCatalystSchema(columnList).toArray
+
+    // Create the data frame from the converted Spark schema
+    sqlContext.createDataFrame(map(pr => {
+      val values = pr.resultMap
+      val row = new GenericMutableRow(values.size)
+
+      columnNames.zipWithIndex.foreach {
+        case (columnName, i) => {
+          row.update(i, values(columnName))
+        }
+      }
+
+      row.asInstanceOf[Row]
+    }), new StructType(structFields))
+  }
+
+  def phoenixSchemaToCatalystSchema(columnList: Seq[ColumnInfo]) = {
+    columnList.map(ci => {
+      val structType = phoenixTypeToCatalystType(ci.getPDataType)
+      StructField(ci.getDisplayName, structType)
+    })
+  }
+
+  // Lookup table for Phoenix types to Spark catalyst types
+  def phoenixTypeToCatalystType(phoenixType: PDataType[_]): DataType = phoenixType match {
+    case t if t.isInstanceOf[PVarchar] || t.isInstanceOf[PChar] => StringType
+    case t if t.isInstanceOf[PLong] || t.isInstanceOf[PUnsignedLong] => LongType
+    case t if t.isInstanceOf[PInteger] || t.isInstanceOf[PUnsignedInt] => IntegerType
+    case t if t.isInstanceOf[PFloat] || t.isInstanceOf[PUnsignedFloat] => FloatType
+    case t if t.isInstanceOf[PDouble] || t.isInstanceOf[PUnsignedDouble] => DoubleType
+    case t if t.isInstanceOf[PDecimal] => DecimalType(None)
+    case t if t.isInstanceOf[PTimestamp] || t.isInstanceOf[PUnsignedTimestamp] => TimestampType
+    case t if t.isInstanceOf[PTime] || t.isInstanceOf[PUnsignedTime] => TimestampType
+    case t if t.isInstanceOf[PDate] || t.isInstanceOf[PUnsignedDate] => TimestampType
+    case t if t.isInstanceOf[PBoolean] => BooleanType
+    case t if t.isInstanceOf[PVarbinary] || t.isInstanceOf[PBinary] => BinaryType
+    case t if t.isInstanceOf[PIntegerArray] || t.isInstanceOf[PUnsignedIntArray] => ArrayType(IntegerType, containsNull = true)
+    case t if t.isInstanceOf[PBooleanArray] => ArrayType(BooleanType, containsNull = true)
+    case t if t.isInstanceOf[PVarcharArray] || t.isInstanceOf[PCharArray] => ArrayType(StringType, containsNull = true)
+    case t if t.isInstanceOf[PVarbinaryArray] || t.isInstanceOf[PBinaryArray] => ArrayType(BinaryType, containsNull = true)
+    case t if t.isInstanceOf[PLongArray] || t.isInstanceOf[PUnsignedLongArray] => ArrayType(LongType, containsNull = true)
+    case t if t.isInstanceOf[PSmallintArray] || t.isInstanceOf[PUnsignedSmallintArray] => ArrayType(IntegerType, containsNull = true)
+    case t if t.isInstanceOf[PTinyintArray] || t.isInstanceOf[PUnsignedTinyintArray] => ArrayType(ByteType, containsNull = true)
+    case t if t.isInstanceOf[PFloatArray] || t.isInstanceOf[PUnsignedFloatArray] => ArrayType(FloatType, containsNull = true)
+    case t if t.isInstanceOf[PDoubleArray] || t.isInstanceOf[PUnsignedDoubleArray] => ArrayType(DoubleType, containsNull = true)
+    case t if t.isInstanceOf[PDecimalArray] => ArrayType(DecimalType(None), containsNull = true)
+    case t if t.isInstanceOf[PTimestampArray] || t.isInstanceOf[PUnsignedTimestampArray] => ArrayType(TimestampType, containsNull = true)
+    case t if t.isInstanceOf[PDateArray] || t.isInstanceOf[PUnsignedDateArray] => ArrayType(TimestampType, containsNull = true)
+    case t if t.isInstanceOf[PTimeArray] || t.isInstanceOf[PUnsignedTimeArray] => ArrayType(TimestampType, containsNull = true)
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
new file mode 100644
index 0000000..48a70ec
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
@@ -0,0 +1,89 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import java.sql.{PreparedStatement, ResultSet}
+import org.apache.hadoop.mapreduce.lib.db.DBWritable
+import org.apache.phoenix.mapreduce.util.ColumnInfoToStringEncoderDecoder
+import org.apache.phoenix.schema.types.{PDate, PhoenixArray}
+import org.joda.time.DateTime
+import scala.collection.{immutable, mutable}
+import scala.collection.JavaConversions._
+
+class PhoenixRecordWritable(var encodedColumns: String) extends DBWritable {
+  val upsertValues = mutable.ArrayBuffer[Any]()
+  val resultMap = mutable.Map[String, AnyRef]()
+
+  def result : immutable.Map[String, AnyRef] = {
+    resultMap.toMap
+  }
+
+  override def write(statement: PreparedStatement): Unit = {
+    // Decode the ColumnInfo list
+    val columns = ColumnInfoToStringEncoderDecoder.decode(encodedColumns).toList
+
+    // Make sure we at least line up in size
+    if(upsertValues.length != columns.length) {
+      throw new UnsupportedOperationException(
+        s"Upsert values ($upsertValues) do not match the specified columns ($columns)"
+      )
+    }
+
+    // Correlate each value (v) to a column type (c) and an index (i)
+    upsertValues.zip(columns).zipWithIndex.foreach {
+      case ((v, c), i) => {
+        if (v != null) {
+          // Both Java and Joda dates used to work in 4.2.3, but now they must be java.sql.Date
+          val (finalObj, finalType) = v match {
+            case dt: DateTime => (new java.sql.Date(dt.getMillis), PDate.INSTANCE.getSqlType)
+            case d: java.util.Date => (new java.sql.Date(d.getTime), PDate.INSTANCE.getSqlType)
+            case _ => (v, c.getSqlType)
+          }
+          statement.setObject(i + 1, finalObj, finalType)
+        } else {
+          statement.setNull(i + 1, c.getSqlType)
+        }
+      }
+    }
+  }
+
+  override def readFields(resultSet: ResultSet): Unit = {
+    val metadata = resultSet.getMetaData
+    for(i <- 1 to metadata.getColumnCount) {
+
+      // Return the contents of a PhoenixArray, if necessary
+      val value = resultSet.getObject(i) match {
+        case x: PhoenixArray => x.getArray
+        case y => y
+      }
+
+      // Put a (ColumnLabel -> value) entry in the result map
+      resultMap(metadata.getColumnLabel(i)) = value
+    }
+  }
+
+  def add(value: Any): Unit = {
+    upsertValues.append(value)
+  }
+
+  // Empty constructor for MapReduce
+  def this() = {
+    this("")
+  }
+
+  // Encoded columns are a Phoenix-serialized representation of the column meta data
+  def setEncodedColumns(encodedColumns: String) {
+    this.encodedColumns = encodedColumns
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
new file mode 100644
index 0000000..2926569
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ProductRDDFunctions.scala
@@ -0,0 +1,68 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.io.NullWritable
+import org.apache.phoenix.mapreduce.PhoenixOutputFormat
+import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, PhoenixConfigurationUtil}
+import org.apache.spark.Logging
+import org.apache.spark.rdd.RDD
+
+class ProductRDDFunctions[A <: Product](data: RDD[A]) extends Logging with Serializable {
+
+  def saveToPhoenix(tableName: String, cols: Seq[String],
+                    conf: Configuration = new Configuration, zkUrl: Option[String] = None)
+                    : Unit = {
+
+    // Setup Phoenix output configuration, make a local copy
+    val config = new Configuration(conf)
+    PhoenixConfigurationUtil.setOutputTableName(config, tableName)
+    PhoenixConfigurationUtil.setUpsertColumnNames(config, cols.mkString(","))
+
+    // Override the Zookeeper URL if present. Throw exception if no address given.
+    zkUrl match {
+      case Some(url) => config.set(HConstants.ZOOKEEPER_QUORUM, url )
+      case _ => {
+        if(config.get(HConstants.ZOOKEEPER_QUORUM) == null) {
+          throw new UnsupportedOperationException(
+            s"One of zkUrl or '${HConstants.ZOOKEEPER_QUORUM}' config property must be provided"
+          )
+        }
+      }
+    }
+
+    // Encode the column info to a serializable type
+    val encodedColumns = ColumnInfoToStringEncoderDecoder.encode(
+      PhoenixConfigurationUtil.getUpsertColumnMetadataList(config)
+    )
+
+    // Map each element of the product to a new (NullWritable, PhoenixRecordWritable)
+    val phxRDD: RDD[(NullWritable, PhoenixRecordWritable)] = data.map { e =>
+      val rec = new PhoenixRecordWritable(encodedColumns)
+      e.productIterator.foreach { rec.add(_) }
+      (null, rec)
+    }
+
+    // Save it
+    phxRDD.saveAsNewAPIHadoopFile(
+      "",
+      classOf[NullWritable],
+      classOf[PhoenixRecordWritable],
+      classOf[PhoenixOutputFormat[PhoenixRecordWritable]],
+      config
+    )
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkContextFunctions.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkContextFunctions.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkContextFunctions.scala
new file mode 100644
index 0000000..a3cd8f0
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkContextFunctions.scala
@@ -0,0 +1,41 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.spark.SparkContext
+import org.apache.spark.rdd.RDD
+
+class SparkContextFunctions(@transient val sc: SparkContext) extends Serializable {
+
+  /*
+    This will return an RDD of Map[String, AnyRef], where the String key corresponds to the column
+    name and the AnyRef value will be a java.sql type as returned by Phoenix
+
+    'table' is the corresponding Phoenix table
+    'columns' is a sequence of of columns to query
+    'predicate' is a set of statements to go after a WHERE clause, e.g. "TID = 123"
+    'zkUrl' is an optional Zookeeper URL to use to connect to Phoenix
+    'conf' is a Hadoop Configuration object. If zkUrl is not set, the "hbase.zookeeper.quorum"
+      property will be used
+   */
+
+  def phoenixTableAsRDD(table: String, columns: Seq[String], predicate: Option[String] = None,
+                        zkUrl: Option[String] = None, conf: Configuration = new Configuration())
+                        : RDD[Map[String, AnyRef]] = {
+
+    // Create a PhoenixRDD, but only return the serializable 'result' map
+    new PhoenixRDD(sc, table, columns, predicate, zkUrl, conf).map(_.result)
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkSqlContextFunctions.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkSqlContextFunctions.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkSqlContextFunctions.scala
new file mode 100644
index 0000000..cc3f378
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/SparkSqlContextFunctions.scala
@@ -0,0 +1,39 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.spark.sql.{DataFrame, SQLContext}
+
+class SparkSqlContextFunctions(@transient val sqlContext: SQLContext) extends Serializable {
+
+  /*
+  This will return a Spark DataFrame, with Phoenix types converted Spark SQL catalyst types
+
+  'table' is the corresponding Phoenix table
+  'columns' is a sequence of of columns to query
+  'predicate' is a set of statements to go after a WHERE clause, e.g. "TID = 123"
+  'zkUrl' is an optional Zookeeper URL to use to connect to Phoenix
+  'conf' is a Hadoop Configuration object. If zkUrl is not set, the "hbase.zookeeper.quorum"
+    property will be used
+ */
+  def phoenixTableAsDataFrame(table: String, columns: Seq[String],
+                               predicate: Option[String] = None, zkUrl: Option[String] = None,
+                               conf: Configuration = new Configuration): DataFrame = {
+
+    // Create the PhoenixRDD and convert it to a DataFrame
+    new PhoenixRDD(sqlContext.sparkContext, table, columns, predicate, zkUrl, conf)
+      .toDataFrame(sqlContext)
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
new file mode 100644
index 0000000..c19ec16
--- /dev/null
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/package.scala
@@ -0,0 +1,32 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix
+
+import org.apache.spark.SparkContext
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.SQLContext
+
+package object spark {
+  implicit def toProductRDDFunctions[A <: Product](rdd: RDD[A]): ProductRDDFunctions[A] = {
+    new ProductRDDFunctions[A](rdd)
+  }
+
+  implicit def toSparkContextFunctions(sc: SparkContext): SparkContextFunctions = {
+    new SparkContextFunctions(sc)
+  }
+
+  implicit def toSparkSqlContextFunctions(sqlContext: SQLContext): SparkSqlContextFunctions = {
+    new SparkSqlContextFunctions(sqlContext)
+  }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/test/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/test/resources/log4j.xml b/phoenix-spark/src/test/resources/log4j.xml
new file mode 100644
index 0000000..d4799da
--- /dev/null
+++ b/phoenix-spark/src/test/resources/log4j.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+  <appender name="console" class="org.apache.log4j.ConsoleAppender">
+    <param name="Target" value="System.out"/>
+
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%-4r [%t] %-5p %c %x - %m%n"/>
+    </layout>
+  </appender>
+
+  <logger name="org.eclipse">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="org.apache">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name = "org.apache.phoenix.mapreduce">
+    <level value="FATAL"/>
+  </logger>
+
+  <logger name="org.mortbay">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="BlockStateChange">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="io.netty">
+    <level value="ERROR"/>
+  </logger>
+
+  <root>
+    <priority value="INFO"/>
+    <appender-ref ref="console"/>
+  </root>
+</log4j:configuration>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/test/resources/setup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/test/resources/setup.sql b/phoenix-spark/src/test/resources/setup.sql
new file mode 100644
index 0000000..14a7e7e
--- /dev/null
+++ b/phoenix-spark/src/test/resources/setup.sql
@@ -0,0 +1,18 @@
+CREATE TABLE table1 (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
+CREATE TABLE table2 (id BIGINT NOT NULL PRIMARY KEY, table1_id BIGINT, "t2col1" VARCHAR)
+UPSERT INTO table1 (id, col1) VALUES (1, 'test_row_1')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (1, 1, 'test_child_1')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (2, 1, 'test_child_2')
+UPSERT INTO table1 (id, col1) VALUES (2, 'test_row_2')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (3, 2, 'test_child_1')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (4, 2, 'test_child_2')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (5, 2, 'test_child_3')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (6, 2, 'test_child_4')
+CREATE TABLE "table3" ("id" BIGINT NOT NULL PRIMARY KEY, "col1" VARCHAR)
+UPSERT INTO "table3" ("id", "col1") VALUES (1, 'foo')
+UPSERT INTO "table3" ("id", "col1") VALUES (2, 'bar')
+CREATE TABLE ARRAY_TEST_TABLE (ID BIGINT NOT NULL PRIMARY KEY, VCARRAY VARCHAR[])
+UPSERT INTO ARRAY_TEST_TABLE (ID, VCARRAY) VALUES (1, ARRAY['String1', 'String2', 'String3'])
+CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, TIMESERIES_KEY TIMESTAMP NOT NULL CONSTRAINT pk PRIMARY KEY (ID, TIMESERIES_KEY))
+UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, CAST(CURRENT_TIME() AS TIMESTAMP))
+CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER, col3 DATE)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala b/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
new file mode 100644
index 0000000..63cb6e4
--- /dev/null
+++ b/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
@@ -0,0 +1,333 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import java.sql.{Connection, DriverManager}
+import java.util.Date
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.{HConstants, HBaseTestingUtility}
+import org.apache.phoenix.schema.ColumnNotFoundException
+import org.apache.phoenix.schema.types.PVarchar
+import org.apache.phoenix.util.ColumnInfo
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.types.{StringType, StructField}
+import org.apache.spark.{SparkConf, SparkContext}
+import org.joda.time.DateTime
+import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
+import org.apache.phoenix.spark._
+
+import scala.collection.mutable.ListBuffer
+
+class PhoenixRDDTest extends FunSuite with Matchers with BeforeAndAfterAll {
+  lazy val hbaseTestingUtility = {
+    new HBaseTestingUtility()
+  }
+
+  lazy val hbaseConfiguration = {
+    val conf = hbaseTestingUtility.getConfiguration
+
+    val quorum = conf.get("hbase.zookeeper.quorum")
+    val clientPort = conf.get("hbase.zookeeper.property.clientPort")
+    val znodeParent = conf.get("zookeeper.znode.parent")
+
+    // This is an odd one - the Zookeeper Quorum entry in the config is totally wrong. It's
+    // just reporting localhost.
+    conf.set(org.apache.hadoop.hbase.HConstants.ZOOKEEPER_QUORUM, s"$quorum:$clientPort:$znodeParent")
+
+    conf
+  }
+
+  lazy val quorumAddress = {
+    hbaseConfiguration.get("hbase.zookeeper.quorum")
+  }
+
+  lazy val zookeeperClientPort = {
+    hbaseConfiguration.get("hbase.zookeeper.property.clientPort")
+  }
+
+  lazy val zookeeperZnodeParent = {
+    hbaseConfiguration.get("zookeeper.znode.parent")
+  }
+
+  lazy val hbaseConnectionString = {
+    s"$quorumAddress:$zookeeperClientPort:$zookeeperZnodeParent"
+  }
+
+  var conn: Connection = _
+
+  override def beforeAll() {
+    hbaseTestingUtility.startMiniCluster()
+
+    conn = DriverManager.getConnection(s"jdbc:phoenix:$hbaseConnectionString")
+
+    conn.setAutoCommit(true)
+
+    // each SQL statement used to set up Phoenix must be on a single line. Yes, that
+    // can potentially make large lines.
+    val setupSqlSource = getClass.getClassLoader.getResourceAsStream("setup.sql")
+
+    val setupSql = scala.io.Source.fromInputStream(setupSqlSource).getLines()
+
+    for (sql <- setupSql) {
+      val stmt = conn.createStatement()
+
+      stmt.execute(sql)
+
+      stmt.close()
+    }
+
+    conn.commit()
+  }
+
+  override def afterAll() {
+    conn.close()
+    hbaseTestingUtility.shutdownMiniCluster()
+  }
+
+  val conf = new SparkConf().set("spark.ui.showConsoleProgress", "false")
+
+  val sc = new SparkContext("local[1]", "PhoenixSparkTest", conf)
+
+  def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
+    val query = "SELECT %s FROM \"%s\"" format(columns.map(f => "\"" + f + "\"").mkString(", "), table)
+
+    query + (predicate match {
+      case Some(p: String) => " WHERE " + p
+      case _ => ""
+    })
+  }
+
+  test("Can create valid SQL") {
+    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
+      conf = hbaseConfiguration)
+
+    rdd.buildSql("MyTable", Array("Foo", "Bar"), None) should
+      equal("SELECT \"Foo\", \"Bar\" FROM \"MyTable\"")
+  }
+
+  test("Can convert Phoenix schema") {
+    val phoenixSchema = List(
+      new ColumnInfo("varcharColumn", PVarchar.INSTANCE.getSqlType)
+    )
+
+    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
+      conf = hbaseConfiguration)
+
+    val catalystSchema = rdd.phoenixSchemaToCatalystSchema(phoenixSchema)
+
+    val expected = List(StructField("varcharColumn", StringType, nullable = true))
+
+    catalystSchema shouldEqual expected
+  }
+
+  test("Can create schema RDD and execute query") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+
+    df1.registerTempTable("sql_table_1")
+
+    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
+      conf = hbaseConfiguration)
+
+    df2.registerTempTable("sql_table_2")
+
+    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 6L
+  }
+
+  test("Can create schema RDD and execute query on case sensitive table (no config)") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"), zkUrl = Some(hbaseConnectionString))
+
+    df1.registerTempTable("table3")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 2L
+  }
+
+  test("Can create schema RDD and execute constrained query") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+
+    df1.registerTempTable("sql_table_1")
+
+    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
+      predicate = Some("\"ID\" = 1"),
+      conf = hbaseConfiguration)
+
+    df2.registerTempTable("sql_table_2")
+
+    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 1L
+  }
+
+  test("Using a predicate referring to a non-existent column should fail") {
+    intercept[RuntimeException] {
+      val sqlContext = new SQLContext(sc)
+
+      val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+        predicate = Some("foo = bar"),
+        conf = hbaseConfiguration)
+
+      df1.registerTempTable("table3")
+
+      val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+      // we have to execute an action before the predicate failure can occur
+      val count = sqlRdd.count()
+    }.getCause shouldBe a [ColumnNotFoundException]
+  }
+
+  test("Can create schema RDD with predicate that will never match") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+      predicate = Some("\"id\" = -1"),
+      conf = hbaseConfiguration)
+
+    df1.registerTempTable("table3")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 0L
+  }
+
+  test("Can create schema RDD with complex predicate") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("DATE_PREDICATE_TEST_TABLE", Array("ID", "TIMESERIES_KEY"),
+      predicate = Some("ID > 0 AND TIMESERIES_KEY BETWEEN CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)"),
+      conf = hbaseConfiguration)
+    
+    df1.registerTempTable("date_predicate_test_table")
+
+    val sqlRdd = df1.sqlContext.sql("SELECT * FROM date_predicate_test_table")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 0L
+  }
+
+  test("Can query an array table") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("ARRAY_TEST_TABLE", Array("ID", "VCARRAY"),
+      conf = hbaseConfiguration)
+
+    df1.registerTempTable("ARRAY_TEST_TABLE")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM ARRAY_TEST_TABLE")
+
+    val count = sqlRdd.count()
+
+    // get row 0, column 1, which should be "VCARRAY"
+    val arrayValues = sqlRdd.collect().apply(0).apply(1)
+
+    arrayValues should equal(Array("String1", "String2", "String3"))
+
+    count shouldEqual 1L
+  }
+  
+  test("Can read a table as an RDD") {
+    val rdd1 = sc.phoenixTableAsRDD("ARRAY_TEST_TABLE", Seq("ID", "VCARRAY"),
+      conf = hbaseConfiguration)
+
+    val count = rdd1.count()
+
+    val arrayValues = rdd1.take(1)(0)("VCARRAY")
+
+    arrayValues should equal(Array("String1", "String2", "String3"))
+
+    count shouldEqual 1L
+  }
+
+  test("Can save to phoenix table") {
+    val sqlContext = new SQLContext(sc)
+
+    val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
+
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "OUTPUT_TEST_TABLE",
+        Seq("ID","COL1","COL2"),
+        hbaseConfiguration
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT ID, COL1, COL2 FROM OUTPUT_TEST_TABLE")
+    val results = ListBuffer[(Long, String, Int)]()
+    while(rs.next()) {
+      results.append((rs.getLong(1), rs.getString(2), rs.getInt(3)))
+    }
+    stmt.close()
+
+    // Verify they match
+    (0 to results.size - 1).foreach { i =>
+      dataSet(i) shouldEqual results(i)
+    }
+  }
+
+  test("Can save Java and Joda dates to Phoenix (no config)") {
+    val dt = new DateTime()
+    val date = new Date()
+
+    val dataSet = List((1L, "1", 1, dt), (2L, "2", 2, date))
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "OUTPUT_TEST_TABLE",
+        Seq("ID","COL1","COL2","COL3"),
+        zkUrl = Some(hbaseConnectionString)
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT COL3 FROM OUTPUT_TEST_TABLE WHERE ID = 1 OR ID = 2 ORDER BY ID ASC")
+    val results = ListBuffer[java.sql.Date]()
+    while(rs.next()) {
+      results.append(rs.getDate(1))
+    }
+    stmt.close()
+
+    // Verify the epochs are equal
+    results(0).getTime shouldEqual dt.getMillis
+    results(1).getTime shouldEqual date.getTime
+  }
+
+  test("Not specifying a zkUrl or a config quorum URL should fail") {
+    intercept[UnsupportedOperationException] {
+      val sqlContext = new SQLContext(sc)
+      val badConf = new Configuration(hbaseConfiguration)
+      badConf.unset(HConstants.ZOOKEEPER_QUORUM)
+      sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = badConf)
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2d9080d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 861c868..bfafe78 100644
--- a/pom.xml
+++ b/pom.xml
@@ -28,6 +28,7 @@
     <module>phoenix-pig</module>
     <module>phoenix-assembly</module>
     <module>phoenix-pherf</module>
+    <module>phoenix-spark</module>
   </modules>
 
   <repositories>


[23/50] [abbrv] phoenix git commit: PHOENIX-1809 Improve explain plan

Posted by ma...@apache.org.
PHOENIX-1809 Improve explain plan


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c823be99
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c823be99
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c823be99

Branch: refs/heads/calcite
Commit: c823be992460c4211bce713d7e24a125b108c2b8
Parents: 9bbd5ea
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Apr 4 13:20:31 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Apr 5 14:07:51 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/HashJoinIT.java  |  3 -
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |  2 +-
 .../phoenix/end2end/QueryWithLimitIT.java       |  2 +-
 .../phoenix/iterate/BaseResultIterators.java    |  2 +-
 .../apache/phoenix/iterate/ExplainTable.java    | 82 +++++++-------------
 .../iterate/MergeSortTopNResultIterator.java    |  1 -
 .../java/org/apache/phoenix/util/ScanUtil.java  | 19 ++++-
 .../org/apache/phoenix/query/QueryPlanTest.java |  2 -
 8 files changed, 50 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 596e5e9..1a2a1d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -395,7 +395,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     LEFT JOIN joinOrderTable o ON o.item_id = i.item_id LIMIT 4
                  */
                 "CLIENT SERIAL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER FILTER BY PageFilter 4\n" +
                 "    SERVER 4 ROW LIMIT\n" +
                 "CLIENT 4 ROW LIMIT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
@@ -777,7 +776,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     LEFT JOIN joinOrderTable o ON o.item_id = i.item_id LIMIT 4
                  */
                 "CLIENT SERIAL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER FILTER BY PageFilter 4\n" +
                 "    SERVER 4 ROW LIMIT\n" +
                 "CLIENT 4 ROW LIMIT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +
@@ -1179,7 +1177,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
                  *     LEFT JOIN joinOrderTable o ON o.item_id = i.item_id LIMIT 4
                  */
                 "CLIENT SERIAL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" +
-                "    SERVER FILTER BY PageFilter 4\n" +
                 "    SERVER 4 ROW LIMIT\n" +
                 "CLIENT 4 ROW LIMIT\n" +
                 "    PARALLEL LEFT-JOIN TABLE 0\n" +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
index 7470598..dca57b4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
@@ -180,7 +180,7 @@ public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
         
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER KEYONLY\n" + 
-                "    SERVER FILTER BY FIRST KEY ONLY AND PageFilter 1\n" + 
+                "    SERVER FILTER BY FIRST KEY ONLY\n" + 
                 "    SERVER 1 ROW LIMIT\n" + 
                 "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs));
         conn.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
index 437bf37..c05c92d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
@@ -74,7 +74,7 @@ public class QueryWithLimitIT extends BaseOwnClusterHBaseManagedTimeIT {
             
             rs = conn.createStatement().executeQuery("EXPLAIN " + query);
             assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER KEYONLY\n" + 
-                    "    SERVER FILTER BY FIRST KEY ONLY AND PageFilter 1\n" + 
+                    "    SERVER FILTER BY FIRST KEY ONLY\n" + 
                     "    SERVER 1 ROW LIMIT\n" + 
                     "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs));
         } finally {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 9ac6a29..8d602b5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -132,7 +132,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
     }
     
     public BaseResultIterators(QueryPlan plan, Integer perScanLimit) throws SQLException {
-        super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint());
+        super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint(), plan.getLimit());
         this.plan = plan;
         StatementContext context = plan.getContext();
         TableRef tableRef = plan.getTableRef();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 2fcc2fb..3fe42fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -26,7 +26,6 @@ import java.util.NoSuchElementException;
 import org.apache.hadoop.hbase.client.Consistency;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.PageFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -36,15 +35,17 @@ import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.filter.BooleanExpressionFilter;
 import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.KeyRange.Bound;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.StringUtil;
 
 import com.google.common.collect.Iterators;
@@ -57,17 +58,19 @@ public abstract class ExplainTable {
     protected final GroupBy groupBy;
     protected final OrderBy orderBy;
     protected final HintNode hint;
+    protected final Integer limit;
    
     public ExplainTable(StatementContext context, TableRef table) {
-        this(context,table,GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, HintNode.EMPTY_HINT_NODE);
+        this(context,table,GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, HintNode.EMPTY_HINT_NODE, null);
     }
 
-    public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy, OrderBy orderBy, HintNode hintNode) {
+    public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy, OrderBy orderBy, HintNode hintNode, Integer limit) {
         this.context = context;
         this.tableRef = table;
         this.groupBy = groupBy;
         this.orderBy = orderBy;
         this.hint = hintNode;
+        this.limit = limit;
     }
 
     private boolean explainSkipScan(StringBuilder buf) {
@@ -98,7 +101,6 @@ public abstract class ExplainTable {
     protected void explain(String prefix, List<String> planSteps) {
         StringBuilder buf = new StringBuilder(prefix);
         ScanRanges scanRanges = context.getScanRanges();
-        boolean hasSkipScanFilter = false;
         Scan scan = context.getScan();
 
         if (scan.getConsistency() != Consistency.STRONG){
@@ -113,7 +115,7 @@ public abstract class ExplainTable {
         if (scanRanges.isEverything()) {
             buf.append("FULL SCAN ");
         } else {
-            hasSkipScanFilter = explainSkipScan(buf);
+            explainSkipScan(buf);
         }
         buf.append("OVER " + tableRef.getTable().getPhysicalName().getString());
         if (!scanRanges.isPointLookup()) {
@@ -121,48 +123,31 @@ public abstract class ExplainTable {
         }
         planSteps.add(buf.toString());
         
-        Filter filter = scan.getFilter();
-        PageFilter pageFilter = null;
-        if (filter != null) {
-            int offset = 0;
-            boolean hasFirstKeyOnlyFilter = false;
-            String filterDesc = "";
-            if (hasSkipScanFilter) {
-                if (filter instanceof FilterList) {
-                    List<Filter> filterList = ((FilterList) filter).getFilters();
-                    if (filterList.get(0) instanceof FirstKeyOnlyFilter) {
-                        hasFirstKeyOnlyFilter = true;
-                        offset = 1;
-                    }
-                    if (filterList.size() > offset+1) {
-                        filterDesc = filterList.get(offset+1).toString();
-                        pageFilter = getPageFilter(filterList);
-                    }
-                }
-            } else if (filter instanceof FilterList) {
-                List<Filter> filterList = ((FilterList) filter).getFilters();
-                if (filterList.get(0) instanceof FirstKeyOnlyFilter) {
-                    hasFirstKeyOnlyFilter = true;
-                    offset = 1;
-                }
-                if (filterList.size() > offset) {
-                    filterDesc = filterList.get(offset).toString();
-                    pageFilter = getPageFilter(filterList);
-                }
-            } else {
+        Iterator<Filter> filterIterator = ScanUtil.getFilterIterator(scan);
+        if (filterIterator.hasNext()) {
+            PageFilter pageFilter = null;
+            FirstKeyOnlyFilter firstKeyOnlyFilter = null;
+            BooleanExpressionFilter whereFilter = null;
+            do {
+                Filter filter = filterIterator.next();
                 if (filter instanceof FirstKeyOnlyFilter) {
-                    hasFirstKeyOnlyFilter = true;
-                } else {
-                    filterDesc = filter.toString();
+                    firstKeyOnlyFilter = (FirstKeyOnlyFilter)filter;
+                } else if (filter instanceof PageFilter) {
+                    pageFilter = (PageFilter)filter;
+                } else if (filter instanceof BooleanExpressionFilter) {
+                    whereFilter = (BooleanExpressionFilter)filter;
                 }
-            }
-            if (filterDesc.length() > 0) {
-                planSteps.add("    SERVER FILTER BY " + (hasFirstKeyOnlyFilter ? "FIRST KEY ONLY AND " : "") + filterDesc);
-            } else if (hasFirstKeyOnlyFilter) {
+            } while (filterIterator.hasNext());
+            if (whereFilter != null) {
+                planSteps.add("    SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + whereFilter.toString());
+            } else if (firstKeyOnlyFilter != null) {
                 planSteps.add("    SERVER FILTER BY FIRST KEY ONLY");
             }
-            if (pageFilter != null) {
-                planSteps.add("    SERVER " + pageFilter.getPageSize() + " ROW LIMIT");
+            if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) { // with GROUP BY, sort happens client-side
+                planSteps.add("    SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S"))
+                        + " SORTED BY " + orderBy.getOrderByExpressions().toString());
+            } else if (pageFilter != null) {
+                planSteps.add("    SERVER " + pageFilter.getPageSize() + " ROW LIMIT");                
             }
         }
         Integer groupByLimit = null;
@@ -173,13 +158,6 @@ public abstract class ExplainTable {
         groupBy.explain(planSteps, groupByLimit);
     }
 
-    private PageFilter getPageFilter(List<Filter> filterList) {
-        for (Filter filter : filterList) {
-            if (filter instanceof PageFilter) return (PageFilter)filter;
-        }
-        return null;
-    }
-
     private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull, int slotIndex) {
         if (Boolean.TRUE.equals(isNull)) {
             buf.append("null");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
index 64ededa..71259e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
@@ -91,7 +91,6 @@ public class MergeSortTopNResultIterator extends MergeSortResultIterator {
     @Override
     public void explain(List<String> planSteps) {
         resultIterators.explain(planSteps);
-        planSteps.add("    SERVER" + (limit == -1 ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderByColumns.toString());
         planSteps.add("CLIENT MERGE SORT");
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 2dfa573..2268866 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -24,6 +24,7 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
@@ -45,13 +46,14 @@ import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.KeyRange.Bound;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
-import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarbinary;
 
+import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 
 /**
@@ -643,4 +645,17 @@ public class ScanUtil {
         }
         return tenantId;
     }
+
+    public static Iterator<Filter> getFilterIterator(Scan scan) {
+        Iterator<Filter> filterIterator;
+        Filter topLevelFilter = scan.getFilter();
+        if (topLevelFilter == null) {
+            filterIterator = Iterators.emptyIterator();
+        } else if (topLevelFilter instanceof FilterList) {
+            filterIterator = ((FilterList) topLevelFilter).getFilters().iterator();
+        } else {
+            filterIterator = Iterators.singletonIterator(topLevelFilter);
+        }
+        return filterIterator;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c823be99/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
index 7ad3e25..2f8088d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
@@ -207,13 +207,11 @@ public class QueryPlanTest extends BaseConnectionlessQueryTest {
         String query = "EXPLAIN SELECT * FROM TENANT_VIEW LIMIT 1";
         ResultSet rs = conn.createStatement().executeQuery(query);
         assertEquals("CLIENT SERIAL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + 
-                "    SERVER FILTER BY PageFilter 1\n" + 
                 "    SERVER 1 ROW LIMIT\n" + 
                 "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs));
         query = "EXPLAIN SELECT * FROM TENANT_VIEW LIMIT " + Integer.MAX_VALUE;
         rs = conn.createStatement().executeQuery(query);
         assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + 
-                "    SERVER FILTER BY PageFilter " + Integer.MAX_VALUE + "\n" + 
                 "    SERVER " + Integer.MAX_VALUE + " ROW LIMIT\n" + 
                 "CLIENT " + Integer.MAX_VALUE + " ROW LIMIT", QueryUtil.getExplainPlan(rs));
         query = "EXPLAIN SELECT * FROM TENANT_VIEW WHERE username = 'Joe' LIMIT 1";


[46/50] [abbrv] phoenix git commit: PHOENIX-1869 Set default to not use joni regex library

Posted by ma...@apache.org.
PHOENIX-1869 Set default to not use joni regex library


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/007361b6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/007361b6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/007361b6

Branch: refs/heads/calcite
Commit: 007361b61b511ffa95ebe3bc0d015c148a4078e4
Parents: 2952250
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Apr 15 08:50:23 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Apr 15 08:50:23 2015 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/007361b6/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 5cc4fa7..e15b018 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -193,7 +193,7 @@ public class QueryServicesOptions {
     
     public static final String DEFAULT_CONSISTENCY_LEVEL = Consistency.STRONG.toString();
 
-    public static final boolean DEFAULT_USE_BYTE_BASED_REGEX = true;
+    public static final boolean DEFAULT_USE_BYTE_BASED_REGEX = false;
 
     private final Configuration config;
 


[29/50] [abbrv] phoenix git commit: PHOENIX-1755 Improve error logging if csv line has insufficient fields

Posted by ma...@apache.org.
PHOENIX-1755 Improve error logging if csv line has insufficient fields

Signed-off-by: Gabriel Reid <ga...@ngdata.com>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ff0e8e4e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ff0e8e4e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ff0e8e4e

Branch: refs/heads/calcite
Commit: ff0e8e4edceb5847a2eeb86500f4784525881d6d
Parents: f666baa
Author: Karel Vervaeke <ka...@ngdata.com>
Authored: Thu Mar 19 16:10:07 2015 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Tue Apr 7 21:15:09 2015 +0200

----------------------------------------------------------------------
 .../java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java     | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff0e8e4e/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index b5f6f9f..0e3294b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -144,6 +144,11 @@ public class CsvUpsertExecutor implements Closeable {
      */
     void execute(CSVRecord csvRecord) {
         try {
+            if (csvRecord.size() < conversionFunctions.size()) {
+                String message = String.format("CSV record does not have enough values (has %d, but needs %d)",
+                        csvRecord.size(), conversionFunctions.size());
+                throw new IllegalArgumentException(message);
+            }
             for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) {
                 Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex));
                 if (sqlValue != null) {


[36/50] [abbrv] phoenix git commit: PHOENIX-1792 Add Week() and Hour() built-ins (Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1792 Add Week() and Hour() built-ins (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/36a7f248
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/36a7f248
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/36a7f248

Branch: refs/heads/calcite
Commit: 36a7f248bb7beb9234a17eb74f6dfe3edf6e3b64
Parents: 2f0b51c
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Apr 13 16:30:06 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Apr 13 16:30:06 2015 -0700

----------------------------------------------------------------------
 .../end2end/YearMonthSecondFunctionIT.java      | 42 +++++++++++++++++---
 1 file changed, 36 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/36a7f248/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
index 20a88c0..cc51bdd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
@@ -60,7 +60,7 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
     public void testYearFunctionDate() throws SQLException {
 
         assertEquals(2015, callYearFunction("YEAR(current_date())"));
-        
+
         assertEquals(2015, callYearFunction("YEAR(now())"));
 
         assertEquals(2008, callYearFunction("YEAR(TO_DATE('2008-01-01', 'yyyy-MM-dd', 'local'))"));
@@ -113,6 +113,12 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
                 "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
         conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2005-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
+                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2006-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
+                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
+        conn.createStatement().execute(dml);
         conn.commit();
 
         ResultSet rs = conn.createStatement().executeQuery("SELECT k1, YEAR(timestamps), YEAR(times), Year(unsignedDates), YEAR(unsignedTimestamps), " +
@@ -136,6 +142,12 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
                 "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
         conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-04-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
+                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-05-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
+                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
+        conn.createStatement().execute(dml);
         conn.commit();
 
         ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MONTH(timestamps), MONTH(times), MONTH(unsignedDates), MONTH(unsignedTimestamps), " +
@@ -159,6 +171,12 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'), " +
                 "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
         conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:20:30'), TO_TIME('2008-05-16 10:00:30'), " +
+                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:50:30'), TO_TIME('2008-05-16 10:00:30'), " +
+                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
+        conn.createStatement().execute(dml);
         conn.commit();
 
         ResultSet rs = conn.createStatement().executeQuery("SELECT k1, SECOND(dates), SECOND(times), SECOND(unsignedDates), SECOND(unsignedTimestamps), " +
@@ -178,14 +196,18 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         String ddl =
                 "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
         conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-02-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-04-12 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-05-18 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-05-18 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
         conn.createStatement().execute(dml);
         conn.commit();
 
         ResultSet rs = conn.createStatement().executeQuery("SELECT k1, WEEK(dates), WEEK(times) FROM T1 where WEEK(timestamps)=15");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
-        assertEquals(5, rs.getInt(2));
+        assertEquals(2, rs.getInt(2));
         assertEquals(20, rs.getInt(3));
         assertFalse(rs.next());
     }
@@ -198,14 +220,19 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 15:10:20'), " +
                 "TO_TIME('2008-05-16 20:40:30'))";
         conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 10:10:20'), " +
+                "TO_TIME('2008-05-16 20:40:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 08:10:20'), " +
+                "TO_TIME('2008-05-16 20:40:30'))";
+        conn.createStatement().execute(dml);
         conn.commit();
 
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, HOUR(dates), HOUR(timestamps), HOUR(times) FROM T1");
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, HOUR(dates), HOUR(times) FROM T1 where HOUR(timestamps)=15");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals(3, rs.getInt(2));
-        assertEquals(15, rs.getInt(3));
-        assertEquals(20, rs.getInt(4));
+        assertEquals(20, rs.getInt(3));
         assertFalse(rs.next());
     }
 
@@ -220,6 +247,9 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         stmt.setInt(1, 1);
         stmt.setDate(2, new Date(date.getTime()-500));
         stmt.execute();
+        stmt.setInt(1, 2);
+        stmt.setDate(2, new Date(date.getTime()+600000));
+        stmt.execute();
         conn.commit();
 
         ResultSet rs = conn.createStatement().executeQuery("SELECT * from T1 where now() > timestamps");


[43/50] [abbrv] phoenix git commit: PHOENIX-1287 Use the joni byte[] regex engine in place of j.u.regex (Shuxiong Ye)

Posted by ma...@apache.org.
PHOENIX-1287 Use the joni byte[] regex engine in place of j.u.regex (Shuxiong Ye)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3f6b2594
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3f6b2594
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3f6b2594

Branch: refs/heads/calcite
Commit: 3f6b25947d07ea0d7756556dd80e951f12ceda69
Parents: 7ef1718
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Apr 14 12:09:17 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Apr 14 12:09:17 2015 -0700

----------------------------------------------------------------------
 .../src/build/components-major-client.xml       |   2 +
 phoenix-core/pom.xml                            |   5 +
 .../phoenix/end2end/LikeExpressionIT.java       |  88 ++++++++
 .../end2end/RegexpReplaceFunctionIT.java        | 100 +++++++++
 .../phoenix/end2end/RegexpSubstrFunctionIT.java |  43 ++--
 .../phoenix/compile/ExpressionCompiler.java     |  15 +-
 .../expression/ByteBasedLikeExpression.java     |  48 +++++
 .../phoenix/expression/ExpressionType.java      |  16 +-
 .../phoenix/expression/LikeExpression.java      |  64 +++---
 .../expression/StringBasedLikeExpression.java   |  48 +++++
 .../ByteBasedRegexpReplaceFunction.java         |  40 ++++
 .../function/ByteBasedRegexpSplitFunction.java  |  38 ++++
 .../function/ByteBasedRegexpSubstrFunction.java |  38 ++++
 .../function/RegexpReplaceFunction.java         |  38 ++--
 .../function/RegexpSplitFunction.java           |  54 +++--
 .../function/RegexpSubstrFunction.java          |  48 ++---
 .../StringBasedRegexpReplaceFunction.java       |  40 ++++
 .../StringBasedRegexpSplitFunction.java         |  38 ++++
 .../StringBasedRegexpSubstrFunction.java        |  38 ++++
 .../util/regex/AbstractBasePattern.java         |  33 +++
 .../util/regex/AbstractBaseSplitter.java        |  24 +++
 .../expression/util/regex/GuavaSplitter.java    |  54 +++++
 .../expression/util/regex/JONIPattern.java      | 201 +++++++++++++++++++
 .../expression/util/regex/JavaPattern.java      |  93 +++++++++
 .../visitor/CloneExpressionVisitor.java         |   3 +-
 .../phoenix/parse/RegexpReplaceParseNode.java   |  55 +++++
 .../phoenix/parse/RegexpSplitParseNode.java     |  55 +++++
 .../phoenix/parse/RegexpSubstrParseNode.java    |  55 +++++
 .../org/apache/phoenix/query/QueryServices.java |   2 +
 .../phoenix/query/QueryServicesOptions.java     |  14 +-
 .../phoenix/schema/types/PArrayDataType.java    |  91 +++++++++
 .../org/apache/phoenix/util/StringUtil.java     |  68 ++++++-
 .../phoenix/compile/WhereOptimizerTest.java     |  18 +-
 .../phoenix/expression/ILikeExpressionTest.java |  32 ++-
 .../phoenix/expression/LikeExpressionTest.java  |  39 +++-
 .../expression/RegexpReplaceFunctionTest.java   |  81 ++++++++
 .../expression/RegexpSplitFunctionTest.java     |  94 +++++++++
 .../expression/RegexpSubstrFunctionTest.java    |  83 ++++++++
 .../expression/SortOrderExpressionTest.java     |  12 +-
 .../util/regex/PatternPerformanceTest.java      | 144 +++++++++++++
 .../org/apache/phoenix/util/StringUtilTest.java |  32 ++-
 .../java/org/apache/phoenix/util/TestUtil.java  |  28 ++-
 pom.xml                                         |   1 +
 43 files changed, 1952 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-assembly/src/build/components-major-client.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/components-major-client.xml b/phoenix-assembly/src/build/components-major-client.xml
index 768cac0..7a2909b 100644
--- a/phoenix-assembly/src/build/components-major-client.xml
+++ b/phoenix-assembly/src/build/components-major-client.xml
@@ -49,6 +49,8 @@
         <include>org.codehaus.jackson:jackson-core-asl</include>
         <include>commons-collections:commons-collections</include>
         <include>joda-time:joda-time</include>
+        <include>org.jruby.joni:joni</include>
+        <include>org.jruby.jcodings:jcodings</include>
       </includes>
     </dependencySet>
   </dependencySets>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 5e0aff7..45b8d73 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -417,5 +417,10 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minicluster</artifactId>
     </dependency>
+    <dependency>
+        <groupId>org.jruby.joni</groupId>
+        <artifactId>joni</artifactId>
+        <version>${joni.version}</version>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
new file mode 100644
index 0000000..1ee0669
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import org.junit.Before;
+import org.junit.Test;
+
+public class LikeExpressionIT extends BaseHBaseManagedTimeIT {
+    @Before
+    public void doBeforeTestSetup() throws Exception {
+        Connection conn = null;
+        PreparedStatement stmt = null;
+        try {
+            conn = DriverManager.getConnection(getUrl());
+            String ddl;
+            ddl = "CREATE TABLE testTable (k VARCHAR NOT NULL PRIMARY KEY, i INTEGER)";
+            conn.createStatement().execute(ddl);
+            conn.commit();
+        } finally {
+            closeStmtAndConn(stmt, conn);
+        }
+        insertRow(conn, "123n7-app-2-", 1);
+        insertRow(conn, "132n7-App-2-", 2);
+        insertRow(conn, "213n7-app-2-", 4);
+        insertRow(conn, "231n7-App-2-", 8);
+        insertRow(conn, "312n7-app-2-", 16);
+        insertRow(conn, "321n7-App-2-", 32);
+    }
+
+    private void insertRow(Connection conn, String k, int i) throws SQLException {
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testTable VALUES (?, ?)");
+        stmt.setString(1, k);
+        stmt.setInt(2, i);
+        stmt.executeUpdate();
+        conn.commit();
+    }
+
+    private void testLikeExpression(Connection conn, String likeStr, int numResult, int expectedSum)
+            throws Exception {
+        String cmd = "select k, i from testTable where k like '" + likeStr + "'";
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery(cmd);
+        int sum = 0;
+        for (int i = 0; i < numResult; ++i) {
+            assertTrue(rs.next());
+            sum += rs.getInt("i");
+        }
+        assertFalse(rs.next());
+        assertEquals(sum, expectedSum);
+    }
+
+    @Test
+    public void testLikeExpression() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        // wildcard
+        testLikeExpression(conn, "%1%3%7%2%", 3, 7);
+        // CaseSensitive
+        testLikeExpression(conn, "%A%", 3, 42);
+        conn.close();
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpReplaceFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpReplaceFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpReplaceFunctionIT.java
new file mode 100644
index 0000000..dcc20ff
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpReplaceFunctionIT.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.GROUPBYTEST_NAME;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class RegexpReplaceFunctionIT extends BaseHBaseManagedTimeIT {
+
+    private int id;
+
+    @Before
+    public void doBeforeTestSetup() throws Exception {
+        ensureTableCreated(getUrl(), GROUPBYTEST_NAME);
+        Connection conn = DriverManager.getConnection(getUrl());
+        insertRow(conn, "Report11", 10);
+        insertRow(conn, "Report11", 10);
+        insertRow(conn, "Report22", 30);
+        insertRow(conn, "Report33", 30);
+        conn.commit();
+        conn.close();
+    }
+
+    private void insertRow(Connection conn, String uri, int appcpu) throws SQLException {
+        PreparedStatement statement = conn.prepareStatement("UPSERT INTO " + GROUPBYTEST_NAME + "(id, uri, appcpu) values (?,?,?)");
+        statement.setString(1, "id" + id);
+        statement.setString(2, uri);
+        statement.setInt(3, appcpu);
+        statement.executeUpdate();
+        id++;
+    }
+
+    @Test
+    public void testGroupByScanWithRegexpReplace() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("select REGEXP_REPLACE(uri, '[1-3]+', '*') suburi, sum(appcpu) sumcpu from " + GROUPBYTEST_NAME + " group by suburi");
+        assertTrue(rs.next());
+        assertEquals(rs.getString("suburi"), "Report*");
+        assertEquals(rs.getInt("sumcpu"), 80);
+        assertFalse(rs.next());
+
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("select REGEXP_REPLACE(uri, '[1-3]+') suburi, sum(appcpu) sumcpu from " + GROUPBYTEST_NAME + " group by suburi");
+        assertTrue(rs.next());
+        assertEquals(rs.getString("suburi"), "Report");
+        assertEquals(rs.getInt("sumcpu"), 80);
+        assertFalse(rs.next());
+
+        conn.close();
+    }
+
+    @Test
+    public void testFilterWithRegexReplace() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        ResultSet rs = conn.createStatement().executeQuery("select id from " + GROUPBYTEST_NAME + " where REGEXP_REPLACE(uri, '[2-3]+', '*') = 'Report*'");
+        assertTrue(rs.next());
+        assertEquals("id2", rs.getString(1));
+        assertTrue(rs.next());
+        assertEquals("id3", rs.getString(1));
+        assertFalse(rs.next());
+
+        rs = conn.createStatement().executeQuery("select id from " + GROUPBYTEST_NAME + " where REGEXP_REPLACE(uri, '[2-3]+') = 'Report'");
+        assertTrue(rs.next());
+        assertEquals("id2", rs.getString(1));
+        assertTrue(rs.next());
+        assertEquals("id3", rs.getString(1));
+        assertFalse(rs.next());
+        conn.close();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpSubstrFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpSubstrFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpSubstrFunctionIT.java
index ff4b95e..938fd5d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpSubstrFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RegexpSubstrFunctionIT.java
@@ -58,30 +58,37 @@ public class RegexpSubstrFunctionIT extends BaseHBaseManagedTimeIT {
         id++;
     }
 
-    @Test
-    public void testGroupByScanWithRegexpSubstr() throws Exception {
-        Connection conn = DriverManager.getConnection(getUrl());
+    private void testGroupByScanWithRegexpSubstr(Connection conn, Integer offset, String exceptedSubstr) throws Exception {
+        String cmd = "select REGEXP_SUBSTR(uri, '[^\\\\?]+'" + ((offset == null) ? "" : ", " + offset.intValue()) +") suburi, sum(appcpu) sumcpu from " + GROUPBYTEST_NAME + " group by suburi";
         Statement stmt = conn.createStatement();
-        ResultSet rs = stmt.executeQuery("select REGEXP_SUBSTR(uri, '[^\\\\?]+') suburi, sum(appcpu) sumcpu from " + GROUPBYTEST_NAME
-            + " group by suburi");
+        ResultSet rs = stmt.executeQuery(cmd);
         assertTrue(rs.next());
-        assertEquals(rs.getString("suburi"), "Report1");
+        assertEquals(rs.getString("suburi"), exceptedSubstr + "1");
         assertEquals(rs.getInt("sumcpu"), 20);
         assertTrue(rs.next());
-        assertEquals(rs.getString("suburi"), "Report2");
+        assertEquals(rs.getString("suburi"), exceptedSubstr + "2");
         assertEquals(rs.getInt("sumcpu"), 30);
         assertTrue(rs.next());
-        assertEquals(rs.getString("suburi"), "Report3");
+        assertEquals(rs.getString("suburi"), exceptedSubstr + "3");
         assertEquals(rs.getInt("sumcpu"), 30);
         assertFalse(rs.next());
-        conn.close();
     }
 
     @Test
-    public void testFilterWithRegexSubstr() throws Exception {
+    public void testGroupByScanWithRegexpSubstr() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
-        ResultSet rs = conn.createStatement().executeQuery(
-                "select id from " + GROUPBYTEST_NAME + " where REGEXP_SUBSTR(uri, '[^\\\\?]+') = 'Report1'");
+        // Default offset
+        testGroupByScanWithRegexpSubstr(conn, null, "Report");
+        // Positive offset
+        testGroupByScanWithRegexpSubstr(conn, Integer.valueOf(2), "eport");
+        // Negative offset
+        testGroupByScanWithRegexpSubstr(conn, Integer.valueOf(-5), "rt");
+        conn.close();
+    }
+
+    private void testFilterWithRegexSubstr(Connection conn, Integer offset, String exceptedSubstr) throws Exception {
+        String cmd = "select id from " + GROUPBYTEST_NAME + " where REGEXP_SUBSTR(uri, '[^\\\\?]+'"+ ((offset == null) ? "" : ", " + offset.intValue()) +") = '" + exceptedSubstr + "1'";
+        ResultSet rs = conn.createStatement().executeQuery(cmd);
         assertTrue(rs.next());
         assertEquals("id0", rs.getString(1));
         assertTrue(rs.next());
@@ -89,4 +96,16 @@ public class RegexpSubstrFunctionIT extends BaseHBaseManagedTimeIT {
         assertFalse(rs.next());
     }
 
+    @Test
+    public void testFilterWithRegexSubstr() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        // Default offset
+        testFilterWithRegexSubstr(conn, null, "Report");
+        // Positive offset
+        testFilterWithRegexSubstr(conn, Integer.valueOf(2), "eport");
+        // Negative offset
+        testFilterWithRegexSubstr(conn, Integer.valueOf(-5), "rt");
+        conn.close();
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 52c67f1..ce95850 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -32,6 +32,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.ArrayConstructorExpression;
+import org.apache.phoenix.expression.ByteBasedLikeExpression;
 import org.apache.phoenix.expression.CaseExpression;
 import org.apache.phoenix.expression.CoerceExpression;
 import org.apache.phoenix.expression.ComparisonExpression;
@@ -60,6 +61,7 @@ import org.apache.phoenix.expression.NotExpression;
 import org.apache.phoenix.expression.OrExpression;
 import org.apache.phoenix.expression.RowKeyColumnExpression;
 import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.expression.StringBasedLikeExpression;
 import org.apache.phoenix.expression.StringConcatExpression;
 import org.apache.phoenix.expression.TimestampAddExpression;
 import org.apache.phoenix.expression.TimestampSubtractExpression;
@@ -100,6 +102,8 @@ import org.apache.phoenix.parse.StringConcatParseNode;
 import org.apache.phoenix.parse.SubqueryParseNode;
 import org.apache.phoenix.parse.SubtractParseNode;
 import org.apache.phoenix.parse.UnsupportedAllParseNodeVisitor;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.ColumnRef;
@@ -497,7 +501,16 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
                 }
             }
         }
-        Expression expression = LikeExpression.create(children, node.getLikeType());
+        QueryServices services = context.getConnection().getQueryServices();
+        boolean useByteBasedRegex =
+                services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB,
+                    QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX);
+        Expression expression;
+        if (useByteBasedRegex) {
+            expression = ByteBasedLikeExpression.create(children, node.getLikeType());
+        } else {
+            expression = StringBasedLikeExpression.create(children, node.getLikeType());
+        }
         if (ExpressionUtil.isConstant(expression)) {
             ImmutableBytesWritable ptr = context.getTempPtr();
             if (!expression.evaluate(null, ptr)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java
new file mode 100644
index 0000000..4dd4f70
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
+import org.apache.phoenix.expression.util.regex.JONIPattern;
+import org.apache.phoenix.parse.LikeParseNode.LikeType;
+
+public class ByteBasedLikeExpression extends LikeExpression {
+
+    public ByteBasedLikeExpression() {
+    }
+
+    public ByteBasedLikeExpression(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBasePattern compilePatternSpec(String value) {
+        return new JONIPattern(value);
+    }
+
+    public static LikeExpression create(List<Expression> children, LikeType likeType) {
+        return new ByteBasedLikeExpression(addLikeTypeChild(children, likeType));
+    }
+
+    @Override
+    public LikeExpression clone(List<Expression> children) {
+        return new ByteBasedLikeExpression(children);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 22778ce..5f598b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -25,6 +25,9 @@ import org.apache.phoenix.expression.function.ArrayAppendFunction;
 import org.apache.phoenix.expression.function.ArrayElemRefExpression;
 import org.apache.phoenix.expression.function.ArrayIndexFunction;
 import org.apache.phoenix.expression.function.ArrayLengthFunction;
+import org.apache.phoenix.expression.function.ByteBasedRegexpReplaceFunction;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSplitFunction;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSubstrFunction;
 import org.apache.phoenix.expression.function.CeilDateExpression;
 import org.apache.phoenix.expression.function.CeilDecimalExpression;
 import org.apache.phoenix.expression.function.CeilFunction;
@@ -79,6 +82,9 @@ import org.apache.phoenix.expression.function.SignFunction;
 import org.apache.phoenix.expression.function.SqlTypeNameFunction;
 import org.apache.phoenix.expression.function.StddevPopFunction;
 import org.apache.phoenix.expression.function.StddevSampFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpReplaceFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSplitFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSubstrFunction;
 import org.apache.phoenix.expression.function.SubstrFunction;
 import org.apache.phoenix.expression.function.SumAggregateFunction;
 import org.apache.phoenix.expression.function.TimezoneOffsetFunction;
@@ -137,6 +143,8 @@ public enum ExpressionType {
     MinAggregateFunction(MinAggregateFunction.class),
     MaxAggregateFunction(MaxAggregateFunction.class),
     LikeExpression(LikeExpression.class),
+    ByteBasedLikeExpression(ByteBasedLikeExpression.class),
+    StringBasedLikeExpression(StringBasedLikeExpression.class),
     NotExpression(NotExpression.class),
     CaseExpression(CaseExpression.class),
     InListExpression(InListExpression.class),
@@ -153,8 +161,12 @@ public enum ExpressionType {
     DecimalDivideExpression(DecimalDivideExpression.class),
     CoalesceFunction(CoalesceFunction.class),
     RegexpReplaceFunction(RegexpReplaceFunction.class),
+    ByteBasedRegexpReplaceFunction(ByteBasedRegexpReplaceFunction.class),
+    StringBasedRegexpReplaceFunction(StringBasedRegexpReplaceFunction.class),
     SQLTypeNameFunction(SqlTypeNameFunction.class),
     RegexpSubstrFunction(RegexpSubstrFunction.class),
+    ByteBasedRegexpSubstrFunction(ByteBasedRegexpSubstrFunction.class),
+    StringBasedRegexpSubstrFunction(StringBasedRegexpSubstrFunction.class),
     StringConcatExpression(StringConcatExpression.class),
     LengthFunction(LengthFunction.class),
     LTrimFunction(LTrimFunction.class),
@@ -199,7 +211,9 @@ public enum ExpressionType {
     SQLIndexTypeFunction(SQLIndexTypeFunction.class),
     ModulusExpression(ModulusExpression.class),
     DistinctValueAggregateFunction(DistinctValueAggregateFunction.class),
-    RegexpSplitFunctiond(RegexpSplitFunction.class),
+    RegexpSplitFunction(RegexpSplitFunction.class),
+    ByteBasedRegexpSplitFunction(ByteBasedRegexpSplitFunction.class),
+    StringBasedRegexpSplitFunction(StringBasedRegexpSplitFunction.class),
     RandomFunction(RandomFunction.class),
     ToTimeFunction(ToTimeFunction.class),
     ToTimestampFunction(ToTimestampFunction.class),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
index 730cffb..52ac969 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
@@ -21,11 +21,12 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.List;
-import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.parse.LikeParseNode.LikeType;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
@@ -49,7 +50,7 @@ import com.google.common.collect.Lists;
  * 
  * @since 0.1
  */
-public class LikeExpression extends BaseCompoundExpression {
+public abstract class LikeExpression extends BaseCompoundExpression {
     private static final Logger logger = LoggerFactory.getLogger(LikeExpression.class);
 
     private static final String ZERO_OR_MORE = "\\E.*\\Q";
@@ -195,10 +196,6 @@ public class LikeExpression extends BaseCompoundExpression {
 //        return sb.toString();
 //    }
 
-    public static LikeExpression create(List<Expression> children, LikeType likeType) {
-        return new LikeExpression(addLikeTypeChild(children,likeType));
-    }
-    
     private static final int LIKE_TYPE_INDEX = 2;
     private static final LiteralExpression[] LIKE_TYPE_LITERAL = new LiteralExpression[LikeType.values().length];
     static {
@@ -206,12 +203,12 @@ public class LikeExpression extends BaseCompoundExpression {
             LIKE_TYPE_LITERAL[likeType.ordinal()] = LiteralExpression.newConstant(likeType.name());
         }
     }
-    private Pattern pattern;
+    private AbstractBasePattern pattern;
 
     public LikeExpression() {
     }
 
-    private static List<Expression> addLikeTypeChild(List<Expression> children, LikeType likeType) {
+    protected static List<Expression> addLikeTypeChild(List<Expression> children, LikeType likeType) {
         List<Expression> newChildren = Lists.newArrayListWithExpectedSize(children.size()+1);
         newChildren.addAll(children);
         newChildren.add(LIKE_TYPE_LITERAL[likeType.ordinal()]);
@@ -247,11 +244,14 @@ public class LikeExpression extends BaseCompoundExpression {
         }
     }
 
-    protected Pattern compilePattern (String value) {
-        if (likeType == LikeType.CASE_SENSITIVE)
-            return Pattern.compile(toPattern(value));
-        else
-            return Pattern.compile("(?i)" + toPattern(value));
+    protected abstract AbstractBasePattern compilePatternSpec(String value);
+
+    protected AbstractBasePattern compilePattern(String value) {
+        if (likeType == LikeType.CASE_SENSITIVE) {
+            return compilePatternSpec(toPattern(value));
+        } else {
+            return compilePatternSpec("(?i)" + toPattern(value));
+        }
     }
 
     private Expression getStrExpression() {
@@ -264,36 +264,40 @@ public class LikeExpression extends BaseCompoundExpression {
 
     @Override
     public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
-        Pattern pattern = this.pattern;
+        AbstractBasePattern pattern = this.pattern;
         if (pattern == null) { // TODO: don't allow? this is going to be slooowwww
             if (!getPatternExpression().evaluate(tuple, ptr)) {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("LIKE is FALSE: pattern is null");
+                if (logger.isTraceEnabled()) {
+                    logger.trace("LIKE is FALSE: pattern is null");
                 }
                 return false;
             }
             String value = (String) PVarchar.INSTANCE.toObject(ptr, getPatternExpression().getSortOrder());
             pattern = compilePattern(value);
-            if (logger.isDebugEnabled()) {
-                logger.debug("LIKE pattern is expression: " + pattern.pattern());
+            if (logger.isTraceEnabled()) {
+                logger.trace("LIKE pattern is expression: " + pattern.pattern());
             }
         }
 
-        if (!getStrExpression().evaluate(tuple, ptr)) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("LIKE is FALSE: child expression is null");
+        Expression strExpression = getStrExpression();
+        SortOrder strSortOrder = strExpression.getSortOrder();
+        PVarchar strDataType = PVarchar.INSTANCE;
+        if (!strExpression.evaluate(tuple, ptr)) {
+            if (logger.isTraceEnabled()) {
+                logger.trace("LIKE is FALSE: child expression is null");
             }
             return false;
         }
-        if (ptr.getLength() == 0) {
-            return true;
-        }
 
-        String value = (String) PVarchar.INSTANCE.toObject(ptr, getStrExpression().getSortOrder());
-        boolean matched = pattern.matcher(value).matches();
-        ptr.set(matched ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES);
-        if (logger.isDebugEnabled()) {
-            logger.debug("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
+        String value = null;
+        if (logger.isTraceEnabled()) {
+            value = (String) strDataType.toObject(ptr, strSortOrder);
+        }
+        strDataType.coerceBytes(ptr, strDataType, strSortOrder, SortOrder.ASC);
+        pattern.matches(ptr, ptr);
+        if (logger.isTraceEnabled()) {
+            boolean matched = ((Boolean) PBoolean.INSTANCE.toObject(ptr)).booleanValue();
+            logger.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
         }
         return true;
     }
@@ -348,4 +352,6 @@ public class LikeExpression extends BaseCompoundExpression {
     public String toString() {
         return (children.get(0) + " LIKE " + children.get(1));
     }
+
+    abstract public LikeExpression clone(List<Expression> children);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java
new file mode 100644
index 0000000..e2afea2
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
+import org.apache.phoenix.expression.util.regex.JavaPattern;
+import org.apache.phoenix.parse.LikeParseNode.LikeType;
+
+public class StringBasedLikeExpression extends LikeExpression {
+
+    public StringBasedLikeExpression() {
+    }
+
+    public StringBasedLikeExpression(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBasePattern compilePatternSpec(String value) {
+        return new JavaPattern(value);
+    }
+
+    public static LikeExpression create(List<Expression> children, LikeType likeType) {
+        return new StringBasedLikeExpression(addLikeTypeChild(children, likeType));
+    }
+
+    @Override
+    public LikeExpression clone(List<Expression> children) {
+        return new StringBasedLikeExpression(children);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java
new file mode 100644
index 0000000..0d6543c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
+import org.apache.phoenix.expression.util.regex.JONIPattern;
+
+public class ByteBasedRegexpReplaceFunction extends RegexpReplaceFunction {
+
+    public ByteBasedRegexpReplaceFunction() {
+    }
+
+    public ByteBasedRegexpReplaceFunction(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBasePattern compilePatternSpec(String value) {
+        return new JONIPattern(value);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java
new file mode 100644
index 0000000..062713e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.util.regex.AbstractBaseSplitter;
+import org.apache.phoenix.expression.util.regex.JONIPattern;
+
+public class ByteBasedRegexpSplitFunction extends RegexpSplitFunction {
+    public ByteBasedRegexpSplitFunction() {
+    }
+
+    public ByteBasedRegexpSplitFunction(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBaseSplitter compilePatternSpec(String value) {
+        return new JONIPattern(value);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java
new file mode 100644
index 0000000..7ee99bf
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
+import org.apache.phoenix.expression.util.regex.JONIPattern;
+
+public class ByteBasedRegexpSubstrFunction extends RegexpSubstrFunction {
+    public ByteBasedRegexpSubstrFunction() {
+    }
+
+    public ByteBasedRegexpSubstrFunction(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBasePattern compilePatternSpec(String value) {
+        return new JONIPattern(value);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
index 3f470a9..f22c978 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java
@@ -20,17 +20,18 @@ package org.apache.phoenix.expression.function;
 import java.io.DataInput;
 import java.io.IOException;
 import java.util.List;
-import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
 import org.apache.phoenix.parse.FunctionParseNode.Argument;
 import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.parse.RegexpReplaceParseNode;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.tuple.Tuple;
 
 
 /**
@@ -48,15 +49,16 @@ import org.apache.phoenix.schema.tuple.Tuple;
  * 
  * @since 0.1
  */
-@BuiltInFunction(name=RegexpReplaceFunction.NAME, args= {
+@BuiltInFunction(name=RegexpReplaceFunction.NAME,
+    nodeClass = RegexpReplaceParseNode.class, args= {
     @Argument(allowedTypes={PVarchar.class}),
     @Argument(allowedTypes={PVarchar.class}),
     @Argument(allowedTypes={PVarchar.class},defaultValue="null")} )
-public class RegexpReplaceFunction extends ScalarFunction {
+public abstract class RegexpReplaceFunction extends ScalarFunction {
     public static final String NAME = "REGEXP_REPLACE";
 
     private boolean hasReplaceStr;
-    private Pattern pattern;
+    private AbstractBasePattern pattern;
     
     public RegexpReplaceFunction() { }
 
@@ -66,11 +68,13 @@ public class RegexpReplaceFunction extends ScalarFunction {
         init();
     }
 
+    protected abstract AbstractBasePattern compilePatternSpec(String value);
+
     private void init() {
         hasReplaceStr = ((LiteralExpression)getReplaceStrExpression()).getValue() != null;
         Object patternString = ((LiteralExpression)children.get(1)).getValue();
         if (patternString != null) {
-            pattern = Pattern.compile((String)patternString);
+            pattern = compilePatternSpec((String) patternString);
         }
     }
 
@@ -84,22 +88,20 @@ public class RegexpReplaceFunction extends ScalarFunction {
         if (!sourceStrExpression.evaluate(tuple, ptr)) {
             return false;
         }
-        String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, sourceStrExpression.getSortOrder());
-        if (sourceStr == null) {
-            return false;
-        }
-        String replaceStr;
+        if (ptr == null) return false;
+        PVarchar type = PVarchar.INSTANCE;
+        type.coerceBytes(ptr, type, sourceStrExpression.getSortOrder(), SortOrder.ASC);
+        ImmutableBytesWritable replacePtr = new ImmutableBytesWritable();
         if (hasReplaceStr) {
-            Expression replaceStrExpression = this.getReplaceStrExpression();
-            if (!replaceStrExpression.evaluate(tuple, ptr)) {
+            Expression replaceStrExpression = getReplaceStrExpression();
+            if (!replaceStrExpression.evaluate(tuple, replacePtr)) {
                 return false;
             }
-            replaceStr = (String) PVarchar.INSTANCE.toObject(ptr, replaceStrExpression.getSortOrder());
+            type.coerceBytes(replacePtr, type, replaceStrExpression.getSortOrder(), SortOrder.ASC);
         } else {
-            replaceStr = "";
+            replacePtr.set(type.toBytes(""));
         }
-        String replacedStr = pattern.matcher(sourceStr).replaceAll(replaceStr);
-        ptr.set(PVarchar.INSTANCE.toBytes(replacedStr));
+        pattern.replaceAll(ptr, replacePtr, ptr);
         return true;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java
index 89c7c9e..b43dec9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java
@@ -24,17 +24,16 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.util.regex.AbstractBaseSplitter;
 import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.parse.RegexpSplitParseNode;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.types.PVarcharArray;
-import org.apache.phoenix.schema.types.PhoenixArray;
-import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ByteUtil;
 
-import com.google.common.base.Splitter;
-import com.google.common.collect.Lists;
-
 /**
  * Function to split a string value into a {@code VARCHAR_ARRAY}.
  * <p>
@@ -46,14 +45,15 @@ import com.google.common.collect.Lists;
  *
  * The function returns a {@link org.apache.phoenix.schema.types.PVarcharArray}
  */
- @FunctionParseNode.BuiltInFunction(name=RegexpSplitFunction.NAME, args= {
+ @FunctionParseNode.BuiltInFunction(name=RegexpSplitFunction.NAME,
+        nodeClass = RegexpSplitParseNode.class, args= {
         @FunctionParseNode.Argument(allowedTypes={PVarchar.class}),
         @FunctionParseNode.Argument(allowedTypes={PVarchar.class})})
-public class RegexpSplitFunction extends ScalarFunction {
+public abstract class RegexpSplitFunction extends ScalarFunction {
 
     public static final String NAME = "REGEXP_SPLIT";
 
-    private Splitter initializedSplitter = null;
+    private AbstractBaseSplitter initializedSplitter = null;
 
     public RegexpSplitFunction() {}
 
@@ -67,11 +67,13 @@ public class RegexpSplitFunction extends ScalarFunction {
         if (patternExpression instanceof LiteralExpression) {
             Object patternValue = ((LiteralExpression) patternExpression).getValue();
             if (patternValue != null) {
-                initializedSplitter = Splitter.onPattern(patternValue.toString());
+                initializedSplitter = compilePatternSpec(patternValue.toString());
             }
         }
     }
 
+    protected abstract AbstractBaseSplitter compilePatternSpec(String value);
+
     @Override
     public void readFields(DataInput input) throws IOException {
         super.readFields(input);
@@ -90,38 +92,28 @@ public class RegexpSplitFunction extends ScalarFunction {
         }
 
         Expression sourceStrExpression = children.get(0);
-        String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, sourceStrExpression.getSortOrder());
-        if (sourceStr == null) { // sourceStr evaluated to null
-            ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
-            return true;
-        }
+        PVarchar type = PVarchar.INSTANCE;
+        type.coerceBytes(ptr, type, sourceStrExpression.getSortOrder(), SortOrder.ASC);
 
-        return split(tuple, ptr, sourceStr);
-    }
-
-    private boolean split(Tuple tuple, ImmutableBytesWritable ptr, String sourceStr) {
-        Splitter splitter = initializedSplitter;
+        AbstractBaseSplitter splitter = initializedSplitter;
         if (splitter == null) {
+            ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable();
             Expression patternExpression = children.get(1);
-            if (!patternExpression.evaluate(tuple, ptr)) {
+            if (!patternExpression.evaluate(tuple, tmpPtr)) {
                 return false;
             }
-            if (ptr.getLength() == 0) {
-                return true; // ptr is already set to null
+            if (tmpPtr.getLength() == 0) {
+                ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+                return true; // set ptr to null
             }
-
-            String patternStr = (String) PVarchar.INSTANCE.toObject(
-                    ptr, patternExpression.getSortOrder());
-            splitter = Splitter.onPattern(patternStr);
+            String patternStr =
+                    (String) PVarchar.INSTANCE.toObject(tmpPtr, patternExpression.getSortOrder());
+            splitter = compilePatternSpec(patternStr);
         }
 
-        List<String> splitStrings = Lists.newArrayList(splitter.split(sourceStr));
-        PhoenixArray splitArray = new PhoenixArray(PVarchar.INSTANCE, splitStrings.toArray());
-        ptr.set(PVarcharArray.INSTANCE.toBytes(splitArray));
-        return true;
+        return splitter.split(ptr, ptr);
     }
 
-
     @Override
     public PDataType getDataType() {
         return PVarcharArray.INSTANCE;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
index 93d8706..430b444 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java
@@ -20,19 +20,19 @@ package org.apache.phoenix.expression.function;
 import java.io.DataInput;
 import java.io.IOException;
 import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
 import org.apache.phoenix.parse.FunctionParseNode.Argument;
 import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
-import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.parse.RegexpSubstrParseNode;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.ByteUtil;
 
 
 /**
@@ -47,17 +47,20 @@ import org.apache.phoenix.util.ByteUtil;
  * 
  * @since 0.1
  */
-@BuiltInFunction(name=RegexpSubstrFunction.NAME, args={
+@BuiltInFunction(name=RegexpSubstrFunction.NAME,
+    nodeClass = RegexpSubstrParseNode.class, args={
     @Argument(allowedTypes={PVarchar.class}),
     @Argument(allowedTypes={PVarchar.class}),
     @Argument(allowedTypes={PLong.class}, defaultValue="1")} )
-public class RegexpSubstrFunction extends PrefixFunction {
+public abstract class RegexpSubstrFunction extends PrefixFunction {
     public static final String NAME = "REGEXP_SUBSTR";
 
-    private Pattern pattern;
+    private AbstractBasePattern pattern;
     private boolean isOffsetConstant;
     private Integer maxLength;
 
+    private static final PDataType TYPE = PVarchar.INSTANCE;
+
     public RegexpSubstrFunction() { }
 
     public RegexpSubstrFunction(List<Expression> children) {
@@ -65,10 +68,12 @@ public class RegexpSubstrFunction extends PrefixFunction {
         init();
     }
 
+    protected abstract AbstractBasePattern compilePatternSpec(String value);
+
     private void init() {
         Object patternString = ((LiteralExpression)children.get(1)).getValue();
         if (patternString != null) {
-            pattern = Pattern.compile((String)patternString);
+            pattern = compilePatternSpec((String) patternString);
         }
         // If the source string has a fixed width, then the max length would be the length 
         // of the source string minus the offset, or the absolute value of the offset if 
@@ -95,13 +100,11 @@ public class RegexpSubstrFunction extends PrefixFunction {
         if (pattern == null) {
             return false;
         }
-        if (!getSourceStrExpression().evaluate(tuple, ptr)) {
-            return false;
-        }
-        String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getSourceStrExpression().getSortOrder());
-        if (sourceStr == null) {
+        ImmutableBytesWritable srcPtr = new ImmutableBytesWritable();
+        if (!getSourceStrExpression().evaluate(tuple, srcPtr)) {
             return false;
         }
+        TYPE.coerceBytes(srcPtr, TYPE, getSourceStrExpression().getSortOrder(), SortOrder.ASC);
 
         Expression offsetExpression = getOffsetExpression();
         if (!offsetExpression.evaluate(tuple, ptr)) {
@@ -109,25 +112,10 @@ public class RegexpSubstrFunction extends PrefixFunction {
         }
         int offset = offsetExpression.getDataType().getCodec().decodeInt(ptr, offsetExpression.getSortOrder());
 
-        int strlen = sourceStr.length();
         // Account for 1 versus 0-based offset
         offset = offset - (offset <= 0 ? 0 : 1);
-        if (offset < 0) { // Offset < 0 means get from end
-            offset = strlen + offset;
-        }
-        if (offset < 0 || offset >= strlen) {
-            return false;
-        }
 
-        Matcher matcher = pattern.matcher(sourceStr);
-        boolean hasSubString = matcher.find(offset);
-        if (!hasSubString) {
-            ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
-            return true;
-        }
-        String subString = matcher.group();
-        ptr.set(PVarchar.INSTANCE.toBytes(subString));
-        return true;
+        return pattern.substr(srcPtr, offset, ptr);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java
new file mode 100644
index 0000000..9aaec70
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
+import org.apache.phoenix.expression.util.regex.JavaPattern;
+
+public class StringBasedRegexpReplaceFunction extends RegexpReplaceFunction {
+
+    public StringBasedRegexpReplaceFunction() {
+    }
+
+    public StringBasedRegexpReplaceFunction(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBasePattern compilePatternSpec(String value) {
+        return new JavaPattern(value);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java
new file mode 100644
index 0000000..77321c2
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.util.regex.AbstractBaseSplitter;
+import org.apache.phoenix.expression.util.regex.GuavaSplitter;
+
+public class StringBasedRegexpSplitFunction extends RegexpSplitFunction {
+    public StringBasedRegexpSplitFunction() {
+    }
+
+    public StringBasedRegexpSplitFunction(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBaseSplitter compilePatternSpec(String value) {
+        return new GuavaSplitter(value);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java
new file mode 100644
index 0000000..253db36
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.util.regex.AbstractBasePattern;
+import org.apache.phoenix.expression.util.regex.JavaPattern;
+
+public class StringBasedRegexpSubstrFunction extends RegexpSubstrFunction {
+    public StringBasedRegexpSubstrFunction() {
+    }
+
+    public StringBasedRegexpSubstrFunction(List<Expression> children) {
+        super(children);
+    }
+
+    @Override
+    protected AbstractBasePattern compilePatternSpec(String value) {
+        return new JavaPattern(value);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java
new file mode 100644
index 0000000..27b47a0
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.util.regex;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+
+public abstract class AbstractBasePattern {
+
+    public abstract void matches(ImmutableBytesWritable srcPtr, ImmutableBytesWritable outPtr);
+
+    public abstract void replaceAll(ImmutableBytesWritable srcPtr,
+            ImmutableBytesWritable replacePtr, ImmutableBytesWritable outPtr);
+
+    public abstract boolean substr(ImmutableBytesWritable srcPtr, int offsetInStr,
+            ImmutableBytesWritable outPtr);
+
+    public abstract String pattern();
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java
new file mode 100644
index 0000000..323eed0
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.util.regex;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+
+public abstract interface AbstractBaseSplitter {
+    public abstract boolean split(ImmutableBytesWritable srcPtr, ImmutableBytesWritable outPtr);
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java
new file mode 100644
index 0000000..325919e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.util.regex;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.schema.types.PVarcharArray;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.apache.phoenix.util.ByteUtil;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+
+public class GuavaSplitter implements AbstractBaseSplitter {
+    private final Splitter splitter;
+
+    public GuavaSplitter(String patternString) {
+        if (patternString != null) {
+            splitter = Splitter.onPattern(patternString);
+        } else {
+            splitter = null;
+        }
+    }
+
+    @Override
+    public boolean split(ImmutableBytesWritable srcPtr, ImmutableBytesWritable outPtr) {
+        String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr);
+        if (sourceStr == null) { // sourceStr evaluated to null
+            outPtr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+        } else {
+            List<String> splitStrings = Lists.newArrayList(splitter.split(sourceStr));
+            PhoenixArray splitArray = new PhoenixArray(PVarchar.INSTANCE, splitStrings.toArray());
+            outPtr.set(PVarcharArray.INSTANCE.toBytes(splitArray));
+        }
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java
new file mode 100644
index 0000000..5c0b1bc
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.util.regex;
+
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PArrayDataType.PArrayDataTypeBytesArrayBuilder;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.StringUtil;
+import org.jcodings.Encoding;
+import org.jcodings.specific.UTF8Encoding;
+import org.joni.Matcher;
+import org.joni.Option;
+import org.joni.Regex;
+import org.joni.Syntax;
+
+import com.google.common.base.Preconditions;
+
+public class JONIPattern extends AbstractBasePattern implements AbstractBaseSplitter {
+
+    private final Regex pattern;
+    private final String patternString;
+
+    public JONIPattern(String patternString) {
+        this(patternString, 0);
+    }
+
+    public JONIPattern(String patternString, int flags) {
+        this(patternString, flags, UTF8Encoding.INSTANCE);
+    }
+
+    public JONIPattern(String patternString, int flags, Encoding coding) {
+        this.patternString = patternString;
+        if (patternString != null) {
+            byte[] bytes = patternString.getBytes();
+            pattern = new Regex(bytes, 0, bytes.length, flags, coding, Syntax.Java);
+        } else {
+            pattern = null;
+        }
+    }
+
+    @Override
+    public void matches(ImmutableBytesWritable srcPtr, ImmutableBytesWritable outPtr) {
+        Preconditions.checkNotNull(srcPtr);
+        Preconditions.checkNotNull(outPtr);
+        boolean ret = matches(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength());
+        outPtr.set(ret ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES);
+    }
+
+    private boolean matches(byte[] bytes, int offset, int len) {
+        int range = offset + len;
+        Matcher matcher = pattern.matcher(bytes, offset, range);
+        int ret = matcher.match(offset, range, Option.DEFAULT);
+        return len == ret;
+    }
+
+    @Override
+    public String pattern() {
+        return patternString;
+    }
+
+    @Override
+    public void replaceAll(ImmutableBytesWritable srcPtr, ImmutableBytesWritable replacePtr,
+            ImmutableBytesWritable replacedPtr) {
+        Preconditions.checkNotNull(srcPtr);
+        Preconditions.checkNotNull(replacePtr);
+        Preconditions.checkNotNull(replacedPtr);
+        byte[] replacedBytes =
+                replaceAll(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), replacePtr.get(),
+                    replacePtr.getOffset(), replacePtr.getLength());
+        replacedPtr.set(replacedBytes);
+    }
+
+    private byte[] replaceAll(byte[] srcBytes, int srcOffset, int srcLen, byte[] replaceBytes,
+            int replaceOffset, int replaceLen) {
+        class PairInt {
+            public int begin, end;
+
+            public PairInt(int begin, int end) {
+                this.begin = begin;
+                this.end = end;
+            }
+        }
+        int srcRange = srcOffset + srcLen;
+        Matcher matcher = pattern.matcher(srcBytes, 0, srcRange);
+        int cur = srcOffset;
+        List<PairInt> searchResults = new LinkedList<PairInt>();
+        int totalBytesNeeded = 0;
+        while (true) {
+            int nextCur = matcher.search(cur, srcRange, Option.DEFAULT);
+            if (nextCur < 0) {
+                totalBytesNeeded += srcRange - cur;
+                break;
+            }
+            searchResults.add(new PairInt(matcher.getBegin(), matcher.getEnd()));
+            totalBytesNeeded += (nextCur - cur) + replaceLen;
+            cur = matcher.getEnd();
+        }
+        byte[] ret = new byte[totalBytesNeeded];
+        int curPosInSrc = srcOffset, curPosInRet = 0;
+        for (PairInt pair : searchResults) {
+            System.arraycopy(srcBytes, curPosInSrc, ret, curPosInRet, pair.begin - curPosInSrc);
+            curPosInRet += pair.begin - curPosInSrc;
+            System.arraycopy(replaceBytes, replaceOffset, ret, curPosInRet, replaceLen);
+            curPosInRet += replaceLen;
+            curPosInSrc = pair.end;
+        }
+        System.arraycopy(srcBytes, curPosInSrc, ret, curPosInRet, srcRange - curPosInSrc);
+        return ret;
+    }
+
+    @Override
+    public boolean substr(ImmutableBytesWritable srcPtr, int offsetInStr,
+            ImmutableBytesWritable outPtr) {
+        Preconditions.checkNotNull(srcPtr);
+        Preconditions.checkNotNull(outPtr);
+        int offsetInBytes = StringUtil.calculateUTF8Offset(srcPtr.get(), srcPtr.getOffset(),
+            srcPtr.getLength(), SortOrder.ASC, offsetInStr);
+        if (offsetInBytes < 0) return false;
+        substr(srcPtr.get(), offsetInBytes, srcPtr.getOffset() + srcPtr.getLength(), outPtr);
+        return true;
+    }
+
+    private boolean substr(byte[] srcBytes, int offset, int range, ImmutableBytesWritable outPtr) {
+        Matcher matcher = pattern.matcher(srcBytes, 0, range);
+        boolean ret = matcher.search(offset, range, Option.DEFAULT) >= 0;
+        if (ret) {
+            int len = matcher.getEnd() - matcher.getBegin();
+            outPtr.set(srcBytes, matcher.getBegin(), len);
+        } else {
+            outPtr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+        }
+        return ret;
+    }
+
+    @Override
+    public boolean split(ImmutableBytesWritable srcPtr, ImmutableBytesWritable outPtr) {
+        return split(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), outPtr);
+    }
+
+    private boolean
+            split(byte[] srcBytes, int srcOffset, int srcLen, ImmutableBytesWritable outPtr) {
+        PArrayDataTypeBytesArrayBuilder builder =
+                new PArrayDataTypeBytesArrayBuilder(PVarchar.INSTANCE, SortOrder.ASC);
+        int srcRange = srcOffset + srcLen;
+        Matcher matcher = pattern.matcher(srcBytes, 0, srcRange);
+        int cur = srcOffset;
+        boolean append;
+        while (true) {
+            int nextCur = matcher.search(cur, srcRange, Option.DEFAULT);
+            if (nextCur < 0) {
+                append = builder.appendElem(srcBytes, cur, srcRange - cur);
+                if (!append) return false;
+                break;
+            }
+
+            // To handle the following case, which adds null at first.
+            // REGEXP_SPLIT("12ONE34TWO56THREE78","[0-9]+")={null, "ONE", "TWO", "THREE", null}
+            if (cur == matcher.getBegin()) {
+                builder.appendElem(srcBytes, cur, 0);
+            }
+
+            if (cur < matcher.getBegin()) {
+                append = builder.appendElem(srcBytes, cur, matcher.getBegin() - cur);
+                if (!append) return false;
+            }
+            cur = matcher.getEnd();
+
+            // To handle the following case, which adds null at last.
+            // REGEXP_SPLIT("12ONE34TWO56THREE78","[0-9]+")={null, "ONE", "TWO", "THREE", null}
+            if (cur == srcRange) {
+                builder.appendElem(srcBytes, cur, 0);
+                break;
+            }
+        }
+        byte[] bytes = builder.getBytesAndClose();
+        if (bytes == null) return false;
+        outPtr.set(bytes);
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java
new file mode 100644
index 0000000..be1188c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.util.regex;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+
+import com.google.common.base.Preconditions;
+
+public class JavaPattern extends AbstractBasePattern {
+
+    private final Pattern pattern;
+
+    public JavaPattern(String patternString) {
+        this(patternString, 0);
+    }
+
+    public JavaPattern(String patternString, int flags) {
+        if (patternString != null) {
+            pattern = Pattern.compile(patternString, flags);
+        } else {
+            pattern = null;
+        }
+    }
+
+    @Override
+    public void matches(ImmutableBytesWritable srcPtr, ImmutableBytesWritable outPtr) {
+        Preconditions.checkNotNull(srcPtr);
+        Preconditions.checkNotNull(outPtr);
+        String matcherSourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr);
+        if (srcPtr.get().length == 0 && matcherSourceStr == null) matcherSourceStr = "";
+        boolean ret = pattern.matcher(matcherSourceStr).matches();
+        outPtr.set(ret ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES);
+    }
+
+    @Override
+    public String pattern() {
+        return pattern.pattern();
+    }
+
+    @Override
+    public void replaceAll(ImmutableBytesWritable srcPtr, ImmutableBytesWritable replacePtr,
+            ImmutableBytesWritable replacedPtr) {
+        Preconditions.checkNotNull(srcPtr);
+        Preconditions.checkNotNull(replacePtr);
+        Preconditions.checkNotNull(replacedPtr);
+        String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr);
+        String replaceStr = (String) PVarchar.INSTANCE.toObject(replacePtr);
+        if (srcPtr.get().length == 0 && sourceStr == null) sourceStr = "";
+        if (replacePtr.get().length == 0 && replaceStr == null) replaceStr = "";
+        String replacedStr = pattern.matcher(sourceStr).replaceAll(replaceStr);
+        replacedPtr.set(PVarchar.INSTANCE.toBytes(replacedStr));
+    }
+
+    @Override
+    public boolean substr(ImmutableBytesWritable srcPtr, int offsetInStr,
+            ImmutableBytesWritable outPtr) {
+        Preconditions.checkNotNull(srcPtr);
+        Preconditions.checkNotNull(outPtr);
+        String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr);
+        if (srcPtr.get().length == 0 && sourceStr == null) sourceStr = "";
+        if (offsetInStr < 0) offsetInStr += sourceStr.length();
+        if (offsetInStr < 0 || offsetInStr >= sourceStr.length()) return false;
+        Matcher matcher = pattern.matcher(sourceStr);
+        boolean ret = matcher.find(offsetInStr);
+        if (ret) {
+            outPtr.set(PVarchar.INSTANCE.toBytes(matcher.group()));
+        } else {
+            outPtr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+        }
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
index f415b01..e6ede7c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
@@ -107,7 +107,8 @@ public class CloneExpressionVisitor extends TraverseAllExpressionVisitor<Express
 
     @Override
     public Expression visitLeave(LikeExpression node, List<Expression> l) {
-        return Determinism.PER_INVOCATION.compareTo(node.getDeterminism()) > 0 ? node :  new LikeExpression(l);
+        return Determinism.PER_INVOCATION.compareTo(node.getDeterminism()) > 0 ? node : node
+                .clone(l);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java
new file mode 100644
index 0000000..4d98405
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.function.ByteBasedRegexpReplaceFunction;
+import org.apache.phoenix.expression.function.RegexpReplaceFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpReplaceFunction;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+/**
+ * Parse node corresponding to {@link RegexpReplaceFunction}. It also acts as a factory for creating
+ * the right kind of RegexpReplaceFunction according to setting in
+ * QueryServices.USE_BYTE_BASED_REGEX_ATTRIB
+ */
+public class RegexpReplaceParseNode extends FunctionParseNode {
+
+    RegexpReplaceParseNode(String name, List<ParseNode> children, BuiltInFunctionInfo info) {
+        super(name, children, info);
+    }
+
+    @Override
+    public Expression create(List<Expression> children, StatementContext context)
+            throws SQLException {
+        QueryServices services = context.getConnection().getQueryServices();
+        boolean useByteBasedRegex =
+                services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB,
+                    QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX);
+        if (useByteBasedRegex) {
+            return new ByteBasedRegexpReplaceFunction(children);
+        } else {
+            return new StringBasedRegexpReplaceFunction(children);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f6b2594/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java
new file mode 100644
index 0000000..74bee07
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.parse;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.function.ByteBasedRegexpSplitFunction;
+import org.apache.phoenix.expression.function.RegexpSplitFunction;
+import org.apache.phoenix.expression.function.StringBasedRegexpSplitFunction;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+
+/**
+ * Parse node corresponding to {@link RegexpSplitFunction}. It also acts as a factory for creating
+ * the right kind of RegexpSplitFunction according to setting in
+ * QueryServices.USE_BYTE_BASED_REGEX_ATTRIB
+ */
+public class RegexpSplitParseNode extends FunctionParseNode {
+
+    RegexpSplitParseNode(String name, List<ParseNode> children, BuiltInFunctionInfo info) {
+        super(name, children, info);
+    }
+
+    @Override
+    public Expression create(List<Expression> children, StatementContext context)
+            throws SQLException {
+        QueryServices services = context.getConnection().getQueryServices();
+        boolean useByteBasedRegex =
+                services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB,
+                    QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX);
+        if (useByteBasedRegex) {
+            return new ByteBasedRegexpSplitFunction(children);
+        } else {
+            return new StringBasedRegexpSplitFunction(children);
+        }
+    }
+}


[50/50] [abbrv] phoenix git commit: Fix compilation errors and assertion failures

Posted by ma...@apache.org.
Fix compilation errors and assertion failures


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9309fff7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9309fff7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9309fff7

Branch: refs/heads/calcite
Commit: 9309fff7ee1dadbfa4e9956662caaa4131561138
Parents: 2368ea6 3fb3bb4
Author: maryannxue <we...@intel.com>
Authored: Thu Apr 16 10:31:13 2015 -0400
Committer: maryannxue <we...@intel.com>
Committed: Thu Apr 16 10:31:13 2015 -0400

----------------------------------------------------------------------
 .gitignore                                      |   5 +
 NOTICE                                          |   5 +
 bin/end2endTest.py                              |   3 +-
 bin/performance.py                              |  13 +-
 bin/psql.py                                     |   3 +-
 phoenix-assembly/pom.xml                        |   8 +-
 phoenix-assembly/src/build/client.xml           |   4 +-
 .../src/build/components-major-client.xml       |   2 +
 .../src/build/components/all-common-files.xml   |  11 +-
 .../src/build/components/all-common-jars.xml    |  11 +
 .../src/build/server-without-antlr.xml          |   2 +
 phoenix-assembly/src/build/server.xml           |   2 +
 phoenix-core/pom.xml                            |  11 +-
 .../wal/ReadWriteKeyValuesWithCodecIT.java      |  14 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  34 +-
 .../apache/phoenix/end2end/AlterSessionIT.java  |  92 +++
 .../phoenix/end2end/ArithmeticQueryIT.java      |  88 +++
 .../phoenix/end2end/ArrayAppendFunctionIT.java  | 667 +++++++++++++++++++
 .../org/apache/phoenix/end2end/ArrayIT.java     |  12 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java  |   1 +
 .../end2end/ClientTimeArithmeticQueryIT.java    |  10 +-
 .../phoenix/end2end/CoalesceFunctionIT.java     |   2 +-
 .../end2end/ConvertTimezoneFunctionIT.java      |  42 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java  | 637 ++++++++++++++++++
 .../apache/phoenix/end2end/DerivedTableIT.java  |   4 +-
 .../phoenix/end2end/EncodeFunctionIT.java       |  12 +-
 .../org/apache/phoenix/end2end/HashJoinIT.java  |  10 -
 .../apache/phoenix/end2end/InstrFunctionIT.java | 126 ++++
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |   2 +-
 .../phoenix/end2end/LikeExpressionIT.java       |  88 +++
 .../org/apache/phoenix/end2end/NotQueryIT.java  |   8 +-
 .../org/apache/phoenix/end2end/OrderByIT.java   | 396 ++++++++++-
 .../phoenix/end2end/PhoenixMetricsIT.java       | 151 +++++
 .../end2end/QueryDatabaseMetaDataIT.java        | 217 +++---
 .../phoenix/end2end/QueryWithLimitIT.java       |   2 +-
 .../end2end/RegexpReplaceFunctionIT.java        | 100 +++
 .../phoenix/end2end/RegexpSubstrFunctionIT.java |  43 +-
 .../RoundFloorCeilFunctionsEnd2EndIT.java       | 114 ++++
 .../phoenix/end2end/RowValueConstructorIT.java  |  33 +-
 .../phoenix/end2end/SignFunctionEnd2EndIT.java  | 141 ++++
 .../phoenix/end2end/StatsCollectorIT.java       |   1 +
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |   7 +
 .../org/apache/phoenix/end2end/SubqueryIT.java  |   8 +-
 .../end2end/SubqueryUsingSortMergeJoinIT.java   |  20 +-
 .../end2end/TimezoneOffsetFunctionIT.java       |  39 +-
 .../phoenix/end2end/ToDateFunctionIT.java       |  57 ++
 .../org/apache/phoenix/end2end/UnionAllIT.java  | 648 ++++++++++++++++++
 .../phoenix/end2end/VariableLengthPKIT.java     |   6 +-
 .../index/GlobalIndexOptimizationIT.java        |  11 +-
 .../index/ImmutableIndexWithStatsIT.java        |  89 +++
 .../end2end/index/IndexExpressionIT.java        | 165 +++--
 .../phoenix/end2end/index/IndexHandlerIT.java   |  12 +-
 .../phoenix/end2end/index/LocalIndexIT.java     |  32 +-
 .../end2end/index/MutableIndexFailureIT.java    |   6 +-
 .../index/balancer/IndexLoadBalancerIT.java     |   6 +-
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   8 +-
 .../apache/phoenix/mapreduce/IndexToolIT.java   | 296 ++++++++
 .../apache/phoenix/rpc/PhoenixClientRpcIT.java  | 113 ++++
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 232 +++++++
 .../TestPhoenixIndexRpcSchedulerFactory.java    |  64 ++
 .../phoenix/trace/PhoenixTraceReaderIT.java     |   2 +-
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |  57 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g       | 155 ++---
 .../hbase/ipc/PhoenixIndexRpcScheduler.java     | 120 ----
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 129 ++++
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java   |  95 +++
 .../controller/ClientRpcControllerFactory.java  |  60 ++
 .../ipc/controller/IndexRpcController.java      |  51 ++
 .../ipc/controller/MetadataRpcController.java   |  55 ++
 .../controller/ServerRpcControllerFactory.java  |  62 ++
 .../regionserver/IndexHalfStoreFileReader.java  |  48 +-
 .../IndexHalfStoreFileReaderGenerator.java      |  14 +-
 .../regionserver/IndexSplitTransaction.java     |  30 +-
 .../hbase/regionserver/KeyValueSkipListSet.java | 183 +++++
 .../hbase/regionserver/LocalIndexMerger.java    |   4 +-
 .../hbase/regionserver/LocalIndexSplitter.java  |  29 +-
 .../apache/phoenix/cache/JodaTimezoneCache.java |  84 +++
 .../calcite/rules/PhoenixClientJoinRule.java    |  33 +-
 .../phoenix/compile/CreateTableCompiler.java    |  37 +
 .../apache/phoenix/compile/DeleteCompiler.java  |   2 +-
 .../phoenix/compile/ExpressionCompiler.java     |  15 +-
 .../apache/phoenix/compile/FromCompiler.java    |   6 +-
 .../apache/phoenix/compile/GroupByCompiler.java |  56 +-
 .../apache/phoenix/compile/JoinCompiler.java    |   4 +-
 .../apache/phoenix/compile/OrderByCompiler.java |  50 +-
 .../phoenix/compile/OrderPreservingTracker.java | 259 +++++++
 .../phoenix/compile/PostIndexDDLCompiler.java   |  38 +-
 .../apache/phoenix/compile/QueryCompiler.java   |  69 +-
 .../phoenix/compile/StatementNormalizer.java    |   2 +-
 .../phoenix/compile/SubqueryRewriter.java       |  19 +-
 .../phoenix/compile/SubselectRewriter.java      |   5 +-
 .../apache/phoenix/compile/TraceQueryPlan.java  |  33 +-
 .../TrackOrderPreservingExpressionCompiler.java | 249 -------
 .../apache/phoenix/compile/UnionCompiler.java   |  88 +++
 .../apache/phoenix/compile/WhereOptimizer.java  |  34 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   6 +-
 .../coprocessor/MetaDataEndpointImpl.java       |   2 +-
 .../phoenix/coprocessor/MetaDataProtocol.java   |   4 +-
 .../coprocessor/SequenceRegionObserver.java     |   2 +-
 .../UngroupedAggregateRegionObserver.java       |   8 +-
 .../exception/DataExceedsCapacityException.java |  40 ++
 .../phoenix/exception/SQLExceptionCode.java     |   9 +-
 .../ValueTypeIncompatibleException.java         |  36 -
 .../apache/phoenix/execute/AggregatePlan.java   |   1 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |   8 +-
 .../apache/phoenix/execute/MutationState.java   |  38 +-
 .../org/apache/phoenix/execute/UnionPlan.java   | 190 ++++++
 .../expression/ByteBasedLikeExpression.java     |  48 ++
 .../expression/DecimalAddExpression.java        |   4 +-
 .../expression/DecimalDivideExpression.java     |   4 +-
 .../expression/DecimalMultiplyExpression.java   |   4 +-
 .../expression/DecimalSubtractExpression.java   |   4 +-
 .../phoenix/expression/ExpressionType.java      |  42 +-
 .../phoenix/expression/LikeExpression.java      |  64 +-
 .../expression/StringBasedLikeExpression.java   |  48 ++
 .../function/ArrayAppendFunction.java           | 127 ++++
 .../ByteBasedRegexpReplaceFunction.java         |  40 ++
 .../function/ByteBasedRegexpSplitFunction.java  |  38 ++
 .../function/ByteBasedRegexpSubstrFunction.java |  38 ++
 .../function/ConvertTimezoneFunction.java       |  38 +-
 .../expression/function/DayOfMonthFunction.java |  83 +++
 .../expression/function/HourFunction.java       |  81 +++
 .../expression/function/InstrFunction.java      | 105 +++
 .../expression/function/MinuteFunction.java     |  81 +++
 .../expression/function/MonthFunction.java      |  83 +++
 .../expression/function/NowFunction.java        |  48 ++
 .../expression/function/RandomFunction.java     |  17 +
 .../function/RegexpReplaceFunction.java         |  38 +-
 .../function/RegexpSplitFunction.java           |  54 +-
 .../function/RegexpSubstrFunction.java          |  48 +-
 .../expression/function/SecondFunction.java     |  81 +++
 .../expression/function/SignFunction.java       |  74 ++
 .../StringBasedRegexpReplaceFunction.java       |  40 ++
 .../StringBasedRegexpSplitFunction.java         |  38 ++
 .../StringBasedRegexpSubstrFunction.java        |  38 ++
 .../function/TimezoneOffsetFunction.java        |  25 +-
 .../expression/function/WeekFunction.java       |  83 +++
 .../expression/function/YearFunction.java       |  82 +++
 .../util/regex/AbstractBasePattern.java         |  33 +
 .../util/regex/AbstractBaseSplitter.java        |  24 +
 .../expression/util/regex/GuavaSplitter.java    |  54 ++
 .../expression/util/regex/JONIPattern.java      | 201 ++++++
 .../expression/util/regex/JavaPattern.java      |  93 +++
 .../visitor/CloneExpressionVisitor.java         |   3 +-
 .../filter/SingleKeyValueComparisonFilter.java  |   4 +-
 .../phoenix/hbase/index/IndexQosCompat.java     |  98 ---
 .../index/IndexQosRpcControllerFactory.java     |  86 ---
 .../org/apache/phoenix/hbase/index/Indexer.java |  10 +-
 .../hbase/index/balancer/IndexLoadBalancer.java |   7 +-
 .../covered/CoveredColumnsIndexBuilder.java     |   2 +-
 .../hbase/index/covered/data/IndexMemStore.java |  27 +-
 .../index/covered/data/LazyValueGetter.java     |   5 +-
 .../index/covered/example/CoveredColumn.java    |   6 +-
 .../example/CoveredColumnIndexCodec.java        |   6 +-
 .../filter/ApplyAndFilterDeletesFilter.java     |   8 +-
 .../index/covered/update/ColumnReference.java   | 131 ++--
 .../ipc/PhoenixIndexRpcSchedulerFactory.java    |  91 ---
 .../index/scanner/FilteredKeyValueScanner.java  |  26 +-
 .../phoenix/hbase/index/scanner/Scanner.java    |   5 +-
 .../hbase/index/scanner/ScannerBuilder.java     |  12 +-
 .../index/table/CoprocessorHTableFactory.java   |  20 -
 .../hbase/index/util/IndexManagementUtil.java   |  40 --
 .../index/util/ReadOnlyImmutableBytesPtr.java   |  59 ++
 .../hbase/index/wal/IndexedKeyValue.java        |  17 -
 .../apache/phoenix/index/IndexMaintainer.java   |  11 +-
 .../phoenix/iterate/BaseResultIterators.java    |  15 +-
 .../apache/phoenix/iterate/ExplainTable.java    |  89 +--
 .../iterate/MergeSortTopNResultIterator.java    |   3 -
 .../phoenix/iterate/ParallelIterators.java      |   7 +-
 .../phoenix/iterate/ScanningResultIterator.java |  24 +-
 .../phoenix/iterate/SpoolingResultIterator.java |   9 +-
 .../phoenix/iterate/UnionResultIterators.java   | 109 +++
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  19 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   3 +-
 .../phoenix/jdbc/PhoenixPreparedStatement.java  |   3 +-
 .../apache/phoenix/jdbc/PhoenixResultSet.java   |  10 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   | 116 +++-
 .../java/org/apache/phoenix/job/JobManager.java | 146 +++-
 .../phoenix/mapreduce/CsvBulkImportUtil.java    |   6 +-
 .../phoenix/mapreduce/CsvBulkLoadTool.java      |  19 +-
 .../phoenix/mapreduce/CsvToKeyValueMapper.java  |  42 +-
 .../phoenix/mapreduce/PhoenixInputFormat.java   |  22 +-
 .../phoenix/mapreduce/PhoenixJobCounters.java   |  29 +
 .../phoenix/mapreduce/PhoenixRecordWriter.java  |   2 +-
 .../phoenix/mapreduce/index/IndexTool.java      | 302 +++++++++
 .../mapreduce/index/PhoenixIndexDBWritable.java |  91 +++
 .../index/PhoenixIndexImportMapper.java         | 133 ++++
 .../phoenix/mapreduce/util/ConnectionUtil.java  |  82 ++-
 .../util/PhoenixConfigurationUtil.java          |  93 ++-
 .../mapreduce/util/PhoenixMapReduceUtil.java    |  22 +-
 .../phoenix/memory/GlobalMemoryManager.java     |   8 +-
 .../org/apache/phoenix/monitoring/Counter.java  |  85 +++
 .../org/apache/phoenix/monitoring/Metric.java   |  64 ++
 .../phoenix/monitoring/PhoenixMetrics.java      | 118 ++++
 .../phoenix/monitoring/SizeStatistic.java       |  78 +++
 .../apache/phoenix/optimize/QueryOptimizer.java |   2 +-
 .../phoenix/parse/AlterSessionStatement.java    |  38 ++
 .../phoenix/parse/CreateIndexStatement.java     |   8 +-
 .../parse/IndexExpressionParseNodeRewriter.java |  30 +-
 .../apache/phoenix/parse/LiteralParseNode.java  |   5 +
 .../apache/phoenix/parse/ParseNodeFactory.java  | 149 ++++-
 .../apache/phoenix/parse/ParseNodeRewriter.java |   5 +-
 .../phoenix/parse/RegexpReplaceParseNode.java   |  55 ++
 .../phoenix/parse/RegexpSplitParseNode.java     |  55 ++
 .../phoenix/parse/RegexpSubstrParseNode.java    |  55 ++
 .../apache/phoenix/parse/SelectStatement.java   |  49 +-
 .../apache/phoenix/parse/TraceStatement.java    |  12 +-
 .../parse/UpdateStatisticsStatement.java        |  11 +-
 .../phoenix/query/BaseQueryServicesImpl.java    |   3 +-
 .../query/ConnectionQueryServicesImpl.java      |   3 +-
 .../org/apache/phoenix/query/HTableFactory.java |   3 +-
 .../apache/phoenix/query/QueryConstants.java    |   2 +
 .../org/apache/phoenix/query/QueryServices.java |  14 +-
 .../phoenix/query/QueryServicesOptions.java     |  41 +-
 .../schema/ConstraintViolationException.java    |  18 +-
 .../phoenix/schema/IllegalDataException.java    |  18 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  31 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |   7 +-
 .../org/apache/phoenix/schema/RowKeySchema.java |  39 +-
 .../schema/stats/StatisticsCollector.java       |  21 +-
 .../phoenix/schema/types/PArrayDataType.java    | 176 +++++
 .../apache/phoenix/schema/types/PBinary.java    |   6 +-
 .../org/apache/phoenix/schema/types/PChar.java  |   8 +-
 .../apache/phoenix/schema/types/PDataType.java  |   3 +-
 .../apache/phoenix/schema/types/PDecimal.java   |  18 +-
 .../apache/phoenix/schema/types/PDouble.java    |  12 +-
 .../org/apache/phoenix/schema/types/PFloat.java |   9 +-
 .../apache/phoenix/schema/types/PInteger.java   |   9 +-
 .../org/apache/phoenix/schema/types/PLong.java  |  13 +-
 .../phoenix/schema/types/PNumericType.java      |  44 ++
 .../phoenix/schema/types/PRealNumber.java       |  39 ++
 .../apache/phoenix/schema/types/PSmallint.java  |   9 +-
 .../apache/phoenix/schema/types/PTinyint.java   |  11 +-
 .../phoenix/schema/types/PUnsignedDouble.java   |   9 +-
 .../phoenix/schema/types/PUnsignedFloat.java    |   5 +-
 .../phoenix/schema/types/PUnsignedInt.java      |   5 +-
 .../phoenix/schema/types/PUnsignedLong.java     |  16 +-
 .../phoenix/schema/types/PUnsignedSmallint.java |   5 +-
 .../schema/types/PUnsignedTimestamp.java        |   2 +-
 .../phoenix/schema/types/PUnsignedTinyint.java  |   5 +-
 .../phoenix/schema/types/PWholeNumber.java      |  35 +
 .../apache/phoenix/trace/TraceMetricSource.java |  15 +-
 .../org/apache/phoenix/trace/TraceReader.java   |   2 +-
 .../apache/phoenix/trace/TracingIterator.java   |   2 +-
 .../org/apache/phoenix/trace/TracingUtils.java  |   2 +-
 .../org/apache/phoenix/trace/util/NullSpan.java |  10 +-
 .../org/apache/phoenix/trace/util/Tracing.java  |  51 +-
 .../org/apache/phoenix/util/ColumnInfo.java     |  20 +-
 .../java/org/apache/phoenix/util/DateUtil.java  |  19 +-
 .../phoenix/util/DefaultEnvironmentEdge.java    |  34 +
 .../apache/phoenix/util/EnvironmentEdge.java    |  34 +
 .../phoenix/util/EnvironmentEdgeManager.java    |  74 ++
 .../java/org/apache/phoenix/util/IndexUtil.java |  28 +-
 .../java/org/apache/phoenix/util/JDBCUtil.java  |  42 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  22 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |  16 +-
 .../java/org/apache/phoenix/util/QueryUtil.java |  46 +-
 .../java/org/apache/phoenix/util/ScanUtil.java  |  19 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |  16 +-
 .../org/apache/phoenix/util/StringUtil.java     |  73 +-
 .../phoenix/util/csv/CsvUpsertExecutor.java     |   5 +
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  20 +-
 .../PhoenixIndexRpcSchedulerFactoryTest.java    | 106 ---
 .../PhoenixRpcSchedulerFactoryTest.java         | 125 ++++
 .../phoenix/cache/JodaTimezoneCacheTest.java    |  53 ++
 .../phoenix/compile/QueryCompilerTest.java      | 179 ++++-
 .../phoenix/compile/QueryMetaDataTest.java      |  11 +
 .../phoenix/compile/QueryOptimizerTest.java     |  13 +-
 .../phoenix/compile/WhereOptimizerTest.java     |  72 +-
 .../expression/ArithmeticOperationTest.java     |  16 +-
 .../expression/ArrayAppendFunctionTest.java     | 345 ++++++++++
 .../phoenix/expression/ILikeExpressionTest.java |  32 +-
 .../phoenix/expression/LikeExpressionTest.java  |  39 +-
 .../expression/RegexpReplaceFunctionTest.java   |  81 +++
 .../expression/RegexpSplitFunctionTest.java     |  94 +++
 .../expression/RegexpSubstrFunctionTest.java    |  83 +++
 .../phoenix/expression/SignFunctionTest.java    | 124 ++++
 .../expression/SortOrderExpressionTest.java     |  12 +-
 .../expression/function/InstrFunctionTest.java  | 108 +++
 .../util/regex/PatternPerformanceTest.java      | 144 ++++
 .../index/covered/TestLocalTableState.java      |   8 +-
 .../index/covered/data/TestIndexMemStore.java   |   5 +-
 .../index/write/TestWALRecoveryCaching.java     |  14 +-
 .../recovery/TestPerRegionIndexWriteCache.java  |  15 +-
 .../apache/phoenix/jdbc/PhoenixTestDriver.java  |   5 +-
 .../mapreduce/CsvBulkImportUtilTest.java        |  14 +-
 .../mapreduce/CsvToKeyValueMapperTest.java      |  26 +-
 .../util/PhoenixConfigurationUtilTest.java      |  60 +-
 .../apache/phoenix/parse/QueryParserTest.java   |  31 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  30 +-
 .../org/apache/phoenix/query/QueryPlanTest.java |   2 -
 .../phoenix/schema/types/PDataTypeTest.java     |   2 +
 .../phoenix/trace/TraceMetricsSourceTest.java   |   4 +-
 .../org/apache/phoenix/util/ColumnInfoTest.java |   8 +-
 .../org/apache/phoenix/util/JDBCUtilTest.java   |  15 +
 .../apache/phoenix/util/MetaDataUtilTest.java   |  18 +-
 .../org/apache/phoenix/util/QueryUtilTest.java  |   2 +-
 .../org/apache/phoenix/util/StringUtilTest.java |  32 +-
 .../java/org/apache/phoenix/util/TestUtil.java  |  28 +-
 phoenix-flume/pom.xml                           |   6 +-
 phoenix-pherf/README.md                         | 105 +++
 phoenix-pherf/cluster/pherf.sh                  |  33 +
 .../config/datamodel/user_defined_schema.sql    |  27 +
 phoenix-pherf/config/env.sh                     |  32 +
 phoenix-pherf/config/pherf.properties           |  31 +
 .../config/scenario/user_defined_scenario.xml   | 134 ++++
 phoenix-pherf/pom.xml                           | 290 ++++++++
 phoenix-pherf/src/main/assembly/cluster.xml     |  52 ++
 phoenix-pherf/src/main/assembly/standalone.xml  |  52 ++
 .../java/org/apache/phoenix/pherf/Pherf.java    | 201 ++++++
 .../apache/phoenix/pherf/PherfConstants.java    |  63 ++
 .../phoenix/pherf/configuration/Column.java     | 210 ++++++
 .../phoenix/pherf/configuration/DataModel.java  |  75 +++
 .../pherf/configuration/DataOverride.java       |  36 +
 .../pherf/configuration/DataSequence.java       |  23 +
 .../pherf/configuration/DataTypeMapping.java    |  46 ++
 .../pherf/configuration/ExecutionType.java      |  23 +
 .../phoenix/pherf/configuration/Query.java      | 136 ++++
 .../phoenix/pherf/configuration/QuerySet.java   | 130 ++++
 .../phoenix/pherf/configuration/Scenario.java   | 163 +++++
 .../pherf/configuration/XMLConfigParser.java    | 157 +++++
 .../pherf/exception/FileLoaderException.java    |  28 +
 .../exception/FileLoaderRuntimeException.java   |  28 +
 .../phoenix/pherf/exception/PherfException.java |  30 +
 .../pherf/exception/PherfRuntimeException.java  |  30 +
 .../phoenix/pherf/jmx/MonitorDetails.java       |  50 ++
 .../phoenix/pherf/jmx/MonitorManager.java       | 173 +++++
 .../java/org/apache/phoenix/pherf/jmx/Stat.java |  32 +
 .../jmx/monitors/CPULoadAverageMonitor.java     |  33 +
 .../pherf/jmx/monitors/ExampleMonitor.java      |  33 +
 .../pherf/jmx/monitors/FreeMemoryMonitor.java   |  30 +
 .../GarbageCollectorElapsedTimeMonitor.java     |  44 ++
 .../pherf/jmx/monitors/HeapMemoryMonitor.java   |  32 +
 .../pherf/jmx/monitors/MaxMemoryMonitor.java    |  30 +
 .../phoenix/pherf/jmx/monitors/Monitor.java     |  30 +
 .../jmx/monitors/NonHeapMemoryMonitor.java      |  32 +
 .../ObjectPendingFinalizationCountMonitor.java  |  33 +
 .../pherf/jmx/monitors/ThreadMonitor.java       |  32 +
 .../pherf/jmx/monitors/TotalMemoryMonitor.java  |  30 +
 .../phoenix/pherf/loaddata/DataLoader.java      | 365 ++++++++++
 .../pherf/result/DataLoadThreadTime.java        |  95 +++
 .../pherf/result/DataLoadTimeSummary.java       |  84 +++
 .../phoenix/pherf/result/DataModelResult.java   |  77 +++
 .../phoenix/pherf/result/QueryResult.java       | 141 ++++
 .../phoenix/pherf/result/QuerySetResult.java    |  47 ++
 .../org/apache/phoenix/pherf/result/Result.java |  54 ++
 .../phoenix/pherf/result/ResultHandler.java     |  37 +
 .../phoenix/pherf/result/ResultManager.java     | 103 +++
 .../apache/phoenix/pherf/result/ResultUtil.java | 271 ++++++++
 .../phoenix/pherf/result/ResultValue.java       |  40 ++
 .../apache/phoenix/pherf/result/RunTime.java    | 114 ++++
 .../phoenix/pherf/result/ScenarioResult.java    |  47 ++
 .../apache/phoenix/pherf/result/ThreadTime.java | 141 ++++
 .../phoenix/pherf/result/file/Extension.java    |  38 ++
 .../phoenix/pherf/result/file/Header.java       |  41 ++
 .../pherf/result/file/ResultFileDetails.java    |  46 ++
 .../pherf/result/impl/CSVResultHandler.java     | 139 ++++
 .../pherf/result/impl/ImageResultHandler.java   | 127 ++++
 .../pherf/result/impl/XMLResultHandler.java     | 103 +++
 .../apache/phoenix/pherf/rules/DataValue.java   |  89 +++
 .../phoenix/pherf/rules/RulesApplier.java       | 377 +++++++++++
 .../phoenix/pherf/schema/SchemaReader.java      |  97 +++
 .../apache/phoenix/pherf/util/PhoenixUtil.java  | 199 ++++++
 .../apache/phoenix/pherf/util/ResourceList.java | 214 ++++++
 .../phoenix/pherf/util/RowCalculator.java       |  78 +++
 .../pherf/workload/MultithreadedDiffer.java     | 113 ++++
 .../pherf/workload/MultithreadedRunner.java     | 170 +++++
 .../phoenix/pherf/workload/QueryExecutor.java   | 246 +++++++
 .../phoenix/pherf/workload/QueryVerifier.java   | 195 ++++++
 .../pherf/workload/WorkloadExecutor.java        | 115 ++++
 .../datamodel/create_prod_test_unsalted.sql     |  33 +
 phoenix-pherf/src/main/resources/hbase-site.xml |  25 +
 .../scenario/prod_test_unsalted_scenario.xml    | 342 ++++++++++
 .../phoenix/pherf/BaseTestWithCluster.java      |  67 ++
 .../org/apache/phoenix/pherf/ColumnTest.java    |  50 ++
 .../phoenix/pherf/ConfigurationParserTest.java  | 200 ++++++
 .../apache/phoenix/pherf/DataIngestTest.java    |  78 +++
 .../apache/phoenix/pherf/DataLoaderTest.java    | 108 +++
 .../org/apache/phoenix/pherf/PherfTest.java     |  67 ++
 .../org/apache/phoenix/pherf/ResourceTest.java  |  68 ++
 .../org/apache/phoenix/pherf/ResultTest.java    | 209 ++++++
 .../apache/phoenix/pherf/RuleGeneratorTest.java | 213 ++++++
 .../apache/phoenix/pherf/SchemaReaderTest.java  |  73 ++
 .../apache/phoenix/pherf/TestHBaseProps.java    |  35 +
 .../test/resources/datamodel/test_schema.sql    |  31 +
 phoenix-pherf/src/test/resources/hbase-site.xml |  25 +
 .../src/test/resources/pherf.test.properties    |  47 ++
 .../test/resources/scenario/test_scenario.xml   | 161 +++++
 phoenix-pherf/standalone/pherf.sh               |  28 +
 phoenix-pig/pom.xml                             |   2 +-
 .../pig/util/QuerySchemaParserFunction.java     |   2 +-
 .../pig/util/SqlQueryToColumnInfoFunction.java  |   2 +-
 .../pig/util/PhoenixPigSchemaUtilTest.java      |   1 +
 phoenix-protocol/src/main/PGuidePosts.proto     |  20 +
 phoenix-spark/README.md                         | 147 ++++
 phoenix-spark/pom.xml                           | 545 +++++++++++++++
 phoenix-spark/src/it/resources/log4j.xml        |  70 ++
 phoenix-spark/src/it/resources/setup.sql        |  35 +
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 417 ++++++++++++
 .../phoenix/spark/ConfigurationUtil.scala       |  65 ++
 .../phoenix/spark/DataFrameFunctions.scala      |  51 ++
 .../apache/phoenix/spark/DefaultSource.scala    |  41 ++
 .../org/apache/phoenix/spark/PhoenixRDD.scala   | 168 +++++
 .../phoenix/spark/PhoenixRecordWritable.scala   |  89 +++
 .../apache/phoenix/spark/PhoenixRelation.scala  |  80 +++
 .../phoenix/spark/ProductRDDFunctions.scala     |  51 ++
 .../phoenix/spark/SparkContextFunctions.scala   |  41 ++
 .../spark/SparkSqlContextFunctions.scala        |  39 ++
 .../org/apache/phoenix/spark/package.scala      |  36 +
 pom.xml                                         |  30 +-
 410 files changed, 23696 insertions(+), 2366 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/main/java/org/apache/phoenix/calcite/rules/PhoenixClientJoinRule.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/calcite/rules/PhoenixClientJoinRule.java
index 99ba81f,0000000..9558209
mode 100644,000000..100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rules/PhoenixClientJoinRule.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rules/PhoenixClientJoinRule.java
@@@ -1,55 -1,0 +1,64 @@@
 +package org.apache.phoenix.calcite.rules;
 +
 +import java.util.Iterator;
 +import java.util.List;
 +
 +import org.apache.calcite.plan.RelOptRule;
 +import org.apache.calcite.plan.RelOptRuleCall;
++import org.apache.calcite.plan.RelTraitSet;
 +import org.apache.calcite.rel.RelCollation;
++import org.apache.calcite.rel.RelCollationTraitDef;
 +import org.apache.calcite.rel.RelCollations;
 +import org.apache.calcite.rel.RelFieldCollation;
 +import org.apache.calcite.rel.RelFieldCollation.Direction;
 +import org.apache.calcite.rel.RelFieldCollation.NullDirection;
 +import org.apache.calcite.rel.RelNode;
 +import org.apache.calcite.rel.core.JoinInfo;
 +import org.apache.phoenix.calcite.rel.PhoenixClientJoin;
 +import org.apache.phoenix.calcite.rel.PhoenixClientSort;
 +import org.apache.phoenix.calcite.rel.PhoenixJoin;
 +import org.apache.phoenix.calcite.rel.PhoenixRel;
++
 +import com.google.common.collect.Lists;
 +
 +public class PhoenixClientJoinRule extends RelOptRule {
 +    
 +    public static PhoenixClientJoinRule INSTANCE = new PhoenixClientJoinRule();
 +
 +    public PhoenixClientJoinRule() {
 +        super(operand(PhoenixJoin.class, any()), "PhoenixClientJoinRule");
 +    }
 +
 +    @Override
 +    public void onMatch(RelOptRuleCall call) {
 +        PhoenixJoin join = call.rel(0);
 +        RelNode left = join.getLeft();
 +        RelNode right = join.getRight();
 +        JoinInfo joinInfo = JoinInfo.of(left, right, join.getCondition());
 +        
-         List<RelFieldCollation> leftFieldCollations = Lists.newArrayList();
-         for (Iterator<Integer> iter = joinInfo.leftKeys.iterator(); iter.hasNext();) {
-             leftFieldCollations.add(new RelFieldCollation(iter.next(), Direction.ASCENDING,NullDirection.FIRST));
-         }
-         RelCollation leftCollation = RelCollations.of(leftFieldCollations);
-         RelNode newLeft = new PhoenixClientSort(left.getCluster(), left.getTraitSet().replace(PhoenixRel.CONVENTION).replace(leftCollation), left, leftCollation, null, null);
-         
-         List<RelFieldCollation> rightFieldCollations = Lists.newArrayList();
-         for (Iterator<Integer> iter = joinInfo.rightKeys.iterator(); iter.hasNext();) {
-             rightFieldCollations.add(new RelFieldCollation(iter.next(), Direction.ASCENDING,NullDirection.FIRST));
++        RelNode newLeft = left;
++        RelNode newRight = right;
++        if (!joinInfo.leftKeys.isEmpty()) {
++            List<RelFieldCollation> leftFieldCollations = Lists.newArrayList();
++            for (Iterator<Integer> iter = joinInfo.leftKeys.iterator(); iter.hasNext();) {
++                leftFieldCollations.add(new RelFieldCollation(iter.next(), Direction.ASCENDING,NullDirection.FIRST));
++            }
++            RelCollation leftCollation = RelCollationTraitDef.INSTANCE.canonize(RelCollations.of(leftFieldCollations));
++            RelTraitSet leftTraitSet = left.getTraitSet().replace(PhoenixRel.CONVENTION).replace(leftCollation);
++            newLeft = new PhoenixClientSort(left.getCluster(), leftTraitSet, left, leftCollation, null, null);
++
++            List<RelFieldCollation> rightFieldCollations = Lists.newArrayList();
++            for (Iterator<Integer> iter = joinInfo.rightKeys.iterator(); iter.hasNext();) {
++                rightFieldCollations.add(new RelFieldCollation(iter.next(), Direction.ASCENDING,NullDirection.FIRST));
++            }
++            RelCollation rightCollation = RelCollationTraitDef.INSTANCE.canonize(RelCollations.of(rightFieldCollations));
++            RelTraitSet rightTraitSet = right.getTraitSet().replace(PhoenixRel.CONVENTION).replace(rightCollation);
++            newRight = new PhoenixClientSort(right.getCluster(), rightTraitSet, right, rightCollation, null, null);
 +        }
-         RelCollation rightCollation = RelCollations.of(rightFieldCollations);
-         RelNode newRight = new PhoenixClientSort(right.getCluster(), right.getTraitSet().replace(PhoenixRel.CONVENTION).replace(rightCollation), right, rightCollation, null, null);
 +
 +        call.transformTo(new PhoenixClientJoin(join.getCluster(),
 +                join.getTraitSet(), newLeft, newRight, join.getCondition(), 
 +                join.getJoinType(), join.getVariablesStopped()));
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
index 4b4e7a3,44b24af..87b944b
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
@@@ -35,13 -36,6 +36,13 @@@ import org.apache.phoenix.parse.Functio
   * @since 0.1
   */
  public class SelectStatement implements FilterableStatement {
 +    public static final SelectStatement SELECT_STAR =
 +            new SelectStatement(
 +                    null, null, false, 
 +                    Collections.<AliasedNode>singletonList(new AliasedNode(null, WildcardParseNode.INSTANCE)),
 +                    null, Collections.<ParseNode>emptyList(),
 +                    null, Collections.<OrderByNode>emptyList(),
-                     null, 0, false, false);
++                    null, 0, false, false, Collections.<SelectStatement>emptyList());
      public static final SelectStatement SELECT_ONE =
              new SelectStatement(
                      null, null, false, 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9309fff7/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index de50aae,977218d..d89899c
--- a/pom.xml
+++ b/pom.xml
@@@ -102,12 -100,12 +104,13 @@@
      <findbugs.version>1.3.2</findbugs.version>
      <jline.version>2.11</jline.version>
      <snappy.version>0.3</snappy.version>
-     <netty.version>3.6.6.Final</netty.version>
+     <netty.version>4.0.23.Final</netty.version>
      <commons-codec.version>1.7</commons-codec.version>
-     <htrace.version>2.04</htrace.version>
+     <htrace.version>3.1.0-incubating</htrace.version>
      <collections.version>3.2.1</collections.version>
-     <jodatime.version>2.3</jodatime.version>
+     <jodatime.version>2.7</jodatime.version>
+     <joni.version>2.1.2</joni.version>
 +    <calcite.version>1.2.0-incubating</calcite.version>
  
      <!-- Test Dependencies -->
      <mockito-all.version>1.8.5</mockito-all.version>


[38/50] [abbrv] phoenix git commit: PHOENIX-1765 Add DAYOFMONTH built-in function (Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1765 Add DAYOFMONTH built-in function (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0d78e48b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0d78e48b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0d78e48b

Branch: refs/heads/calcite
Commit: 0d78e48b579739fb85a64fe7258f1838dc1af2c8
Parents: 8975fc1
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Apr 13 17:05:45 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Apr 13 17:05:45 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DateTimeIT.java  | 281 +++++++++++++++++-
 .../end2end/YearMonthSecondFunctionIT.java      | 287 -------------------
 .../phoenix/expression/ExpressionType.java      |   4 +-
 .../expression/function/DayOfMonthFunction.java |  83 ++++++
 4 files changed, 365 insertions(+), 290 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d78e48b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index 371d82e..0db36df 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -42,12 +42,15 @@ import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
 import java.text.Format;
 import java.util.Calendar;
 
 import org.apache.phoenix.util.DateUtil;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 
@@ -59,12 +62,21 @@ public class DateTimeIT extends BaseHBaseManagedTimeIT {
 
     public DateTimeIT() throws Exception {
         super();
-        conn = DriverManager.getConnection(getUrl());
         date = new Date(System.currentTimeMillis());
+    }
+
+    @Before
+    public void setUp() throws SQLException {
+        conn = DriverManager.getConnection(getUrl());
         initAtable();
     }
 
-    protected void initAtable() throws Exception { 
+    @After
+    public void tearDown() throws SQLException {
+        conn.close();
+    }
+    
+    private void initAtable() throws SQLException { 
         ensureTableCreated(getUrl(), ATABLE_NAME, (byte[][])null);
         PreparedStatement stmt = conn.prepareStatement(
             "upsert into " + ATABLE_NAME +
@@ -357,4 +369,269 @@ public class DateTimeIT extends BaseHBaseManagedTimeIT {
             assertEquals(ROW1, rs.getString(1));
             assertFalse(rs.next());
     }
+
+    private static int callYearFunction(Connection conn, String invocation) throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs =
+                stmt.executeQuery(String
+                    .format("SELECT %s FROM SYSTEM.CATALOG LIMIT 1", invocation));
+        assertTrue(rs.next());
+        int returnValue = rs.getInt(1);
+        assertFalse(rs.next());
+        rs.close();
+        stmt.close();
+        return returnValue;
+    }
+
+    private int callYearFunction(String invocation) throws SQLException {
+        return callYearFunction(conn, invocation);
+    }
+
+    @Test
+    public void testYearFunctionDate() throws SQLException {
+
+        assertEquals(2015, callYearFunction("YEAR(current_date())"));
+
+        assertEquals(2015, callYearFunction("YEAR(now())"));
+
+        assertEquals(2008, callYearFunction("YEAR(TO_DATE('2008-01-01', 'yyyy-MM-dd', 'local'))"));
+
+        assertEquals(2004,
+            callYearFunction("YEAR(TO_DATE('2004-12-13 10:13:18', 'yyyy-MM-dd hh:mm:ss'))"));
+
+        assertEquals(2015, callYearFunction("YEAR(TO_DATE('2015-01-27T16:17:57+00:00'))"));
+
+        assertEquals(2005, callYearFunction("YEAR(TO_DATE('2005-12-13 10:13:18'))"));
+
+        assertEquals(2006, callYearFunction("YEAR(TO_DATE('2006-12-13'))"));
+
+        assertEquals(2015, callYearFunction("YEAR(TO_DATE('2015-W05'))"));
+
+        assertEquals(
+            2008,
+            callYearFunction("YEAR(TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'))"));
+    }
+
+    @Test
+    public void testYearFunctionTimestamp() throws SQLException {
+
+        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-01-27T16:17:57+00:00'))"));
+
+        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-01-27T16:17:57Z'))"));
+
+        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-W10-3'))"));
+
+        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-W05'))"));
+
+        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-063'))"));
+
+        assertEquals(2006, callYearFunction("YEAR(TO_TIMESTAMP('2006-12-13'))"));
+
+        assertEquals(2004,
+            callYearFunction("YEAR(TO_TIMESTAMP('2004-12-13 10:13:18', 'yyyy-MM-dd hh:mm:ss'))"));
+
+        assertEquals(
+            2008,
+            callYearFunction("YEAR(TO_TIMESTAMP('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'))"));
+    }
+
+    @Test
+    public void testYearFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
+                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2005-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
+                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2006-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
+                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, YEAR(timestamps), YEAR(times), Year(unsignedDates), YEAR(unsignedTimestamps), " +
+                "YEAR(unsignedTimes) FROM T1 where YEAR(dates) = 2004");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(2006, rs.getInt(2));
+        assertEquals(2008, rs.getInt(3));
+        assertEquals(2010, rs.getInt(4));
+        assertEquals(2012, rs.getInt(5));
+        assertEquals(2015, rs.getInt(6));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testMonthFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
+                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-04-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
+                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-05-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
+                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MONTH(timestamps), MONTH(times), MONTH(unsignedDates), MONTH(unsignedTimestamps), " +
+                "MONTH(unsignedTimes) FROM T1 where MONTH(dates) = 3");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(4, rs.getInt(2));
+        assertEquals(5, rs.getInt(3));
+        assertEquals(6, rs.getInt(4));
+        assertEquals(7, rs.getInt(5));
+        assertEquals(12, rs.getInt(6));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testSecondFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'), " +
+                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:20:30'), TO_TIME('2008-05-16 10:00:30'), " +
+                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:50:30'), TO_TIME('2008-05-16 10:00:30'), " +
+                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, SECOND(dates), SECOND(times), SECOND(unsignedDates), SECOND(unsignedTimestamps), " +
+                "SECOND(unsignedTimes) FROM T1 where SECOND(timestamps)=20");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(10, rs.getInt(2));
+        assertEquals(30, rs.getInt(3));
+        assertEquals(40, rs.getInt(4));
+        assertEquals(0, rs.getInt(5));
+        assertEquals(50, rs.getInt(6));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testWeekFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-04-12 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-05-18 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-05-18 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, WEEK(dates), WEEK(times) FROM T1 where WEEK(timestamps)=15");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(2, rs.getInt(2));
+        assertEquals(20, rs.getInt(3));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testHourFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 15:10:20'), " +
+                "TO_TIME('2008-05-16 20:40:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 10:10:20'), " +
+                "TO_TIME('2008-05-16 20:40:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 08:10:20'), " +
+                "TO_TIME('2008-05-16 20:40:30'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, HOUR(dates), HOUR(times) FROM T1 where HOUR(timestamps)=15");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(3, rs.getInt(2));
+        assertEquals(20, rs.getInt(3));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testNowFunction() throws Exception {
+        Date date = new Date(System.currentTimeMillis());
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, timestamps TIMESTAMP CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (?, ?)";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.setInt(1, 1);
+        stmt.setDate(2, new Date(date.getTime()-500));
+        stmt.execute();
+        stmt.setInt(1, 2);
+        stmt.setDate(2, new Date(date.getTime()+600000));
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT * from T1 where now() > timestamps");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(new Date(date.getTime()-500), rs.getDate(2));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testMinuteFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:10:10'), TO_TIMESTAMP('2006-04-12 00:20:20'), TO_TIME('2008-05-16 10:30:30'), " +
+                "TO_DATE('2010-06-20 00:40:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:50:50'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-03-01 00:10:10'), TO_TIMESTAMP('2006-04-12 00:50:20'), TO_TIME('2008-05-16 10:30:30'), " +
+                "TO_DATE('2010-06-20 00:40:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:50:50'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MINUTE(dates), MINUTE(times), MINUTE(unsignedDates), MINUTE(unsignedTimestamps), " +
+                "MINUTE(unsignedTimes) FROM T1 where MINUTE(timestamps)=20");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(10, rs.getInt(2));
+        assertEquals(30, rs.getInt(3));
+        assertEquals(40, rs.getInt(4));
+        assertEquals(0, rs.getInt(5));
+        assertEquals(50, rs.getInt(6));
+        assertFalse(rs.next());
+    }
+    
+    @Test
+    public void testDayOfMonthFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-01-08 10:00:10'), TO_TIMESTAMP('2006-04-12 08:00:20'), TO_TIME('2008-05-26 11:00:30'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-01-18 10:00:10'), TO_TIMESTAMP('2006-05-22 08:00:20'), TO_TIME('2008-12-30 11:00:30'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, DAYOFMONTH(dates), DAYOFMONTH(times) FROM T1 where DAYOFMONTH(timestamps)=12");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(8, rs.getInt(2));
+        assertEquals(26, rs.getInt(3));
+        assertFalse(rs.next());
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d78e48b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
deleted file mode 100644
index 1206ee4..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
- * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
- * for the specific language governing permissions and limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.Date;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
-    private Connection conn;
-
-    @Before
-    public void setUp() throws SQLException {
-        conn = DriverManager.getConnection(getUrl());
-    }
-
-    @After
-    public void tearDown() throws SQLException {
-        conn.close();
-    }
-
-    private static int callYearFunction(Connection conn, String invocation) throws SQLException {
-        Statement stmt = conn.createStatement();
-        ResultSet rs =
-                stmt.executeQuery(String
-                    .format("SELECT %s FROM SYSTEM.CATALOG LIMIT 1", invocation));
-        assertTrue(rs.next());
-        int returnValue = rs.getInt(1);
-        assertFalse(rs.next());
-        rs.close();
-        stmt.close();
-        return returnValue;
-    }
-
-    private int callYearFunction(String invocation) throws SQLException {
-        return callYearFunction(conn, invocation);
-    }
-
-    @Test
-    public void testYearFunctionDate() throws SQLException {
-
-        assertEquals(2015, callYearFunction("YEAR(current_date())"));
-
-        assertEquals(2015, callYearFunction("YEAR(now())"));
-
-        assertEquals(2008, callYearFunction("YEAR(TO_DATE('2008-01-01', 'yyyy-MM-dd', 'local'))"));
-
-        assertEquals(2004,
-            callYearFunction("YEAR(TO_DATE('2004-12-13 10:13:18', 'yyyy-MM-dd hh:mm:ss'))"));
-
-        assertEquals(2015, callYearFunction("YEAR(TO_DATE('2015-01-27T16:17:57+00:00'))"));
-
-        assertEquals(2005, callYearFunction("YEAR(TO_DATE('2005-12-13 10:13:18'))"));
-
-        assertEquals(2006, callYearFunction("YEAR(TO_DATE('2006-12-13'))"));
-
-        assertEquals(2015, callYearFunction("YEAR(TO_DATE('2015-W05'))"));
-
-        assertEquals(
-            2008,
-            callYearFunction("YEAR(TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'))"));
-    }
-
-    @Test
-    public void testYearFunctionTimestamp() throws SQLException {
-
-        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-01-27T16:17:57+00:00'))"));
-
-        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-01-27T16:17:57Z'))"));
-
-        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-W10-3'))"));
-
-        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-W05'))"));
-
-        assertEquals(2015, callYearFunction("YEAR(TO_TIMESTAMP('2015-063'))"));
-
-        assertEquals(2006, callYearFunction("YEAR(TO_TIMESTAMP('2006-12-13'))"));
-
-        assertEquals(2004,
-            callYearFunction("YEAR(TO_TIMESTAMP('2004-12-13 10:13:18', 'yyyy-MM-dd hh:mm:ss'))"));
-
-        assertEquals(
-            2008,
-            callYearFunction("YEAR(TO_TIMESTAMP('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'))"));
-    }
-
-    @Test
-    public void testYearFuncAgainstColumns() throws Exception {
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
-                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
-                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2005-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
-                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2006-03-01 00:00:00'), TO_TIMESTAMP('2006-02-01 00:00:00'), TO_TIME('2008-02-01 00:00:00'), " +
-                "TO_DATE('2010-03-01 00:00:00:896', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-02-01'), TO_TIME('2015-02-01 00:00:00'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, YEAR(timestamps), YEAR(times), Year(unsignedDates), YEAR(unsignedTimestamps), " +
-                "YEAR(unsignedTimes) FROM T1 where YEAR(dates) = 2004");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(2006, rs.getInt(2));
-        assertEquals(2008, rs.getInt(3));
-        assertEquals(2010, rs.getInt(4));
-        assertEquals(2012, rs.getInt(5));
-        assertEquals(2015, rs.getInt(6));
-        assertFalse(rs.next());
-    }
-
-    @Test
-    public void testMonthFuncAgainstColumns() throws Exception {
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
-                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
-                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-04-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
-                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-05-10 00:00:00'), TO_TIMESTAMP('2006-04-12 00:00:00'), TO_TIME('2008-05-16 00:00:00'), " +
-                "TO_DATE('2010-06-20 00:00:00:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:00'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MONTH(timestamps), MONTH(times), MONTH(unsignedDates), MONTH(unsignedTimestamps), " +
-                "MONTH(unsignedTimes) FROM T1 where MONTH(dates) = 3");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(4, rs.getInt(2));
-        assertEquals(5, rs.getInt(3));
-        assertEquals(6, rs.getInt(4));
-        assertEquals(7, rs.getInt(5));
-        assertEquals(12, rs.getInt(6));
-        assertFalse(rs.next());
-    }
-
-    @Test
-    public void testSecondFuncAgainstColumns() throws Exception {
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
-                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'), " +
-                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:20:30'), TO_TIME('2008-05-16 10:00:30'), " +
-                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:50:30'), TO_TIME('2008-05-16 10:00:30'), " +
-                "TO_DATE('2010-06-20 00:00:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:00:50'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, SECOND(dates), SECOND(times), SECOND(unsignedDates), SECOND(unsignedTimestamps), " +
-                "SECOND(unsignedTimes) FROM T1 where SECOND(timestamps)=20");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(10, rs.getInt(2));
-        assertEquals(30, rs.getInt(3));
-        assertEquals(40, rs.getInt(4));
-        assertEquals(0, rs.getInt(5));
-        assertEquals(50, rs.getInt(6));
-        assertFalse(rs.next());
-    }
-
-    @Test
-    public void testWeekFuncAgainstColumns() throws Exception {
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-04-12 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-05-18 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('2004-01-10 10:00:10'), TO_TIMESTAMP('2006-05-18 08:00:20'), TO_TIME('2008-05-16 10:00:30'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, WEEK(dates), WEEK(times) FROM T1 where WEEK(timestamps)=15");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(2, rs.getInt(2));
-        assertEquals(20, rs.getInt(3));
-        assertFalse(rs.next());
-    }
-
-    @Test
-    public void testHourFuncAgainstColumns() throws Exception {
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 15:10:20'), " +
-                "TO_TIME('2008-05-16 20:40:30'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 10:10:20'), " +
-                "TO_TIME('2008-05-16 20:40:30'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (3, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 08:10:20'), " +
-                "TO_TIME('2008-05-16 20:40:30'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, HOUR(dates), HOUR(times) FROM T1 where HOUR(timestamps)=15");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(3, rs.getInt(2));
-        assertEquals(20, rs.getInt(3));
-        assertFalse(rs.next());
-    }
-
-    @Test
-    public void testNowFunction() throws Exception {
-        Date date = new Date(System.currentTimeMillis());
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, timestamps TIMESTAMP CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (?, ?)";
-        PreparedStatement stmt = conn.prepareStatement(dml);
-        stmt.setInt(1, 1);
-        stmt.setDate(2, new Date(date.getTime()-500));
-        stmt.execute();
-        stmt.setInt(1, 2);
-        stmt.setDate(2, new Date(date.getTime()+600000));
-        stmt.execute();
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT * from T1 where now() > timestamps");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(new Date(date.getTime()-500), rs.getDate(2));
-        assertFalse(rs.next());
-    }
-
-    @Test
-    public void testMinuteFuncAgainstColumns() throws Exception {
-        String ddl =
-                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
-                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
-        conn.createStatement().execute(ddl);
-        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:10:10'), TO_TIMESTAMP('2006-04-12 00:20:20'), TO_TIME('2008-05-16 10:30:30'), " +
-                "TO_DATE('2010-06-20 00:40:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:50:50'))";
-        conn.createStatement().execute(dml);
-        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-03-01 00:10:10'), TO_TIMESTAMP('2006-04-12 00:50:20'), TO_TIME('2008-05-16 10:30:30'), " +
-                "TO_DATE('2010-06-20 00:40:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:50:50'))";
-        conn.createStatement().execute(dml);
-        conn.commit();
-
-        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MINUTE(dates), MINUTE(times), MINUTE(unsignedDates), MINUTE(unsignedTimestamps), " +
-                "MINUTE(unsignedTimes) FROM T1 where MINUTE(timestamps)=20");
-        assertTrue(rs.next());
-        assertEquals(1, rs.getInt(1));
-        assertEquals(10, rs.getInt(2));
-        assertEquals(30, rs.getInt(3));
-        assertEquals(40, rs.getInt(4));
-        assertEquals(0, rs.getInt(5));
-        assertEquals(50, rs.getInt(6));
-        assertFalse(rs.next());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d78e48b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index d42c5f2..d562d6a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -31,6 +31,7 @@ import org.apache.phoenix.expression.function.CeilTimestampExpression;
 import org.apache.phoenix.expression.function.CoalesceFunction;
 import org.apache.phoenix.expression.function.ConvertTimezoneFunction;
 import org.apache.phoenix.expression.function.CountAggregateFunction;
+import org.apache.phoenix.expression.function.DayOfMonthFunction;
 import org.apache.phoenix.expression.function.DecodeFunction;
 import org.apache.phoenix.expression.function.DistinctCountAggregateFunction;
 import org.apache.phoenix.expression.function.DistinctValueAggregateFunction;
@@ -209,7 +210,8 @@ public enum ExpressionType {
     HourFunction(HourFunction.class),
     NowFunction(NowFunction.class),
     InstrFunction(InstrFunction.class),
-    MinuteFunction(MinuteFunction.class)
+    MinuteFunction(MinuteFunction.class),
+    DayOfMonthFunction(DayOfMonthFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d78e48b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java
new file mode 100644
index 0000000..0c328cf
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.joda.time.DateTime;
+
+/**
+ * 
+ * Implementation of the DayOfMonth() buildin. Input Date/Timestamp.
+ * An integer from 1 to 31 representing the day of the month in date
+ * 
+ */
+@BuiltInFunction(name=DayOfMonthFunction.NAME, 
+args={@Argument(allowedTypes={PTimestamp.class})})
+public class DayOfMonthFunction extends ScalarFunction {
+    public static final String NAME = "DAYOFMONTH";
+
+    public DayOfMonthFunction() {
+    }
+
+    public DayOfMonthFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression expression = getChildExpression();
+        if (!expression.evaluate(tuple, ptr)) {
+            return false;
+        }
+        if ( ptr.getLength() == 0) {
+            return true; //means null
+        }
+        long dateTime = expression.getDataType().getCodec().decodeLong(ptr, expression.getSortOrder());
+        DateTime dt = new DateTime(dateTime);
+        int day = dt.getDayOfMonth();
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeInt(day, byteValue, 0);
+        ptr.set(byteValue);
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    private Expression getChildExpression() {
+        return children.get(0);
+    }
+}


[02/50] [abbrv] phoenix git commit: PHOENIX-1776 The literal -1.0 (floating point) should not be converted to -1 (Integer) (Dave Hacker)

Posted by ma...@apache.org.
PHOENIX-1776 The literal -1.0 (floating point) should not be converted to -1 (Integer) (Dave Hacker)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f941e89f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f941e89f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f941e89f

Branch: refs/heads/calcite
Commit: f941e89f4f3a3778282ffc8570c64a181e01c043
Parents: ad2ad0c
Author: Samarth <sa...@salesforce.com>
Authored: Thu Mar 26 00:37:12 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Mar 26 00:37:12 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ArithmeticQueryIT.java      | 28 ++++++++++++++++++++
 .../apache/phoenix/parse/ParseNodeFactory.java  |  4 ++-
 2 files changed, 31 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f941e89f/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
index 2df1827..72eb016 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
@@ -957,4 +957,32 @@ public class ArithmeticQueryIT extends BaseHBaseManagedTimeIT {
         assertTrue(rs.next());
         assertEquals(1.333333333, rs.getDouble(1), 0.001);
     }
+
+    @Test
+    public void testFloatingPointUpsert() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE test (id VARCHAR not null primary key, name VARCHAR, lat FLOAT)";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO test(id,name,lat) VALUES ('testid', 'testname', -1.00)";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT lat FROM test");
+        assertTrue(rs.next());
+        assertEquals(-1.0f, rs.getFloat(1), 0.001);
+    }
+
+    @Test
+    public void testFloatingPointMultiplicationUpsert() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String ddl = "CREATE TABLE test (id VARCHAR not null primary key, name VARCHAR, lat FLOAT)";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO test(id,name,lat) VALUES ('testid', 'testname', -1.00 * 1)";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT lat FROM test");
+        assertTrue(rs.next());
+        assertEquals(-1.0f, rs.getFloat(1), 0.001);
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f941e89f/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 931f327..eb1768c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -47,6 +47,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.schema.stats.StatisticsCollectionScope;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -577,7 +578,8 @@ public class ParseNodeFactory {
 
     public ParseNode negate(ParseNode child) {
         // Prevents reparsing of -1 from becoming 1*-1 and 1*1*-1 with each re-parsing
-        if (LiteralParseNode.ONE.equals(child)) {
+        if (LiteralParseNode.ONE.equals(child) && ((LiteralParseNode)child).getType().isCoercibleTo(
+                PLong.INSTANCE)) {
             return LiteralParseNode.MINUS_ONE;
         }
         return new MultiplyParseNode(Arrays.asList(child,LiteralParseNode.MINUS_ONE));


[37/50] [abbrv] phoenix git commit: PHOENIX-1846 Add MINUTE built-in function (Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1846 Add MINUTE built-in function (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8975fc1a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8975fc1a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8975fc1a

Branch: refs/heads/calcite
Commit: 8975fc1a427700fc3f13d34ddd610404781683ae
Parents: 36a7f24
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Apr 13 16:32:31 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Apr 13 16:32:31 2015 -0700

----------------------------------------------------------------------
 .../end2end/YearMonthSecondFunctionIT.java      | 26 +++++++
 .../phoenix/expression/ExpressionType.java      |  4 +-
 .../expression/function/MinuteFunction.java     | 81 ++++++++++++++++++++
 3 files changed, 110 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8975fc1a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
index cc51bdd..1206ee4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
@@ -258,4 +258,30 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         assertEquals(new Date(date.getTime()-500), rs.getDate(2));
         assertFalse(rs.next());
     }
+
+    @Test
+    public void testMinuteFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME, " +
+                        "unsignedDates UNSIGNED_DATE, unsignedTimestamps UNSIGNED_TIMESTAMP, unsignedTimes UNSIGNED_TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:10:10'), TO_TIMESTAMP('2006-04-12 00:20:20'), TO_TIME('2008-05-16 10:30:30'), " +
+                "TO_DATE('2010-06-20 00:40:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:50:50'))";
+        conn.createStatement().execute(dml);
+        dml = "UPSERT INTO T1 VALUES (2, TO_DATE('2004-03-01 00:10:10'), TO_TIMESTAMP('2006-04-12 00:50:20'), TO_TIME('2008-05-16 10:30:30'), " +
+                "TO_DATE('2010-06-20 00:40:40:789', 'yyyy-MM-dd HH:mm:ss:SSS'), TO_TIMESTAMP('2012-07-28'), TO_TIME('2015-12-25 00:50:50'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, MINUTE(dates), MINUTE(times), MINUTE(unsignedDates), MINUTE(unsignedTimestamps), " +
+                "MINUTE(unsignedTimes) FROM T1 where MINUTE(timestamps)=20");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(10, rs.getInt(2));
+        assertEquals(30, rs.getInt(3));
+        assertEquals(40, rs.getInt(4));
+        assertEquals(0, rs.getInt(5));
+        assertEquals(50, rs.getInt(6));
+        assertFalse(rs.next());
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8975fc1a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index c25b1cc..d42c5f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -52,6 +52,7 @@ import org.apache.phoenix.expression.function.LpadFunction;
 import org.apache.phoenix.expression.function.MD5Function;
 import org.apache.phoenix.expression.function.MaxAggregateFunction;
 import org.apache.phoenix.expression.function.MinAggregateFunction;
+import org.apache.phoenix.expression.function.MinuteFunction;
 import org.apache.phoenix.expression.function.MonthFunction;
 import org.apache.phoenix.expression.function.NowFunction;
 import org.apache.phoenix.expression.function.NthValueFunction;
@@ -207,7 +208,8 @@ public enum ExpressionType {
     WeekFunction(WeekFunction.class),
     HourFunction(HourFunction.class),
     NowFunction(NowFunction.class),
-    InstrFunction(InstrFunction.class)
+    InstrFunction(InstrFunction.class),
+    MinuteFunction(MinuteFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8975fc1a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java
new file mode 100644
index 0000000..fc721fc
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PTimestamp;
+
+/**
+ * 
+ * Implementation of the Minute() buildin. Input Date/Timestamp/Time.
+ * Returns an integer from 0 to 59 representing the minute component of time
+ * 
+ */
+@BuiltInFunction(name=MinuteFunction.NAME, 
+args={@Argument(allowedTypes={PTimestamp.class})})
+public class MinuteFunction extends ScalarFunction {
+    public static final String NAME = "MINUTE";
+
+    public MinuteFunction() {
+    }
+
+    public MinuteFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression expression = getChildExpression();
+        if (!expression.evaluate(tuple, ptr)) {
+            return false;
+        }
+        if ( ptr.getLength() == 0) {
+            return true; //means null
+        }
+        long dateTime = expression.getDataType().getCodec().decodeLong(ptr, expression.getSortOrder());
+        int minute = (int)(((dateTime/1000) % 3600)/60);
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeInt(minute, byteValue, 0);
+        ptr.set(byteValue);
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    private Expression getChildExpression() {
+        return children.get(0);
+    }
+}


[27/50] [abbrv] phoenix git commit: PHOENIX-1807 Support UNION queries in subquery

Posted by ma...@apache.org.
PHOENIX-1807 Support UNION queries in subquery


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9ddb484a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9ddb484a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9ddb484a

Branch: refs/heads/calcite
Commit: 9ddb484aaf0b84b729be9b699b0528813b4ffb1b
Parents: 7aea692
Author: maryannxue <we...@intel.com>
Authored: Mon Apr 6 23:16:20 2015 -0400
Committer: maryannxue <we...@intel.com>
Committed: Mon Apr 6 23:16:20 2015 -0400

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/UnionAllIT.java  | 91 +++++++++++++++++---
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |  2 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |  2 +-
 .../apache/phoenix/compile/FromCompiler.java    |  2 +-
 .../apache/phoenix/compile/JoinCompiler.java    |  4 +-
 .../apache/phoenix/compile/QueryCompiler.java   |  4 +-
 .../phoenix/compile/SubqueryRewriter.java       | 19 ++--
 .../apache/phoenix/compile/UnionCompiler.java   |  6 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   | 10 +--
 .../apache/phoenix/optimize/QueryOptimizer.java |  2 +-
 .../apache/phoenix/parse/ParseNodeFactory.java  | 72 ++++++++++------
 .../apache/phoenix/parse/SelectStatement.java   | 33 ++++---
 12 files changed, 173 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
index b3b2f7d..1d4055a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UnionAllIT.java
@@ -28,7 +28,6 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
 import java.sql.Statement;
 import java.util.Collections;
 import java.util.Map;
@@ -424,7 +423,7 @@ public class UnionAllIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
 
     @Test
-    public void testUnionAllInSubquery() throws Exception {
+    public void testUnionAllInDerivedTable() throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(false);
@@ -435,21 +434,63 @@ public class UnionAllIT extends BaseOwnClusterHBaseManagedTimeIT {
                     "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
             createTestTable(getUrl(), ddl);
 
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            conn.commit();
+
             ddl = "CREATE TABLE b_table " +
-                    "  (a_string varchar not null, col1 integer" +
+                    "  (a_string varchar not null, col2 integer" +
                     "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
             createTestTable(getUrl(), ddl);
 
-            ddl = "select a_string, col1 from test_table where a_string in (select a_string from test_table union all select a_string from b_table)";
-            conn.createStatement().executeQuery(ddl);
-        }  catch (SQLFeatureNotSupportedException e) {
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 60);
+            stmt.execute();
+            conn.commit();
+
+            String query = "select a_string from " +
+                    "(select a_string, col1 from test_table union all select a_string, col2 from b_table order by a_string)";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("a", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("a", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("b", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("c", rs.getString(1));
+            assertFalse(rs.next());
+            
+            query = "select c from " +
+                    "(select a_string, col1 c from test_table union all select a_string, col2 c from b_table order by c)";
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals(10, rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(20, rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(30, rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(60, rs.getInt(1));
+            assertFalse(rs.next());
         } finally {
             conn.close();
         }
     }
 
     @Test
-    public void testUnionAllInSubqueryDerived() throws Exception {
+    public void testUnionAllInSubquery() throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(false);
@@ -460,15 +501,43 @@ public class UnionAllIT extends BaseOwnClusterHBaseManagedTimeIT {
                     "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
             createTestTable(getUrl(), ddl);
 
+            String dml = "UPSERT INTO test_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            conn.commit();
+
             ddl = "CREATE TABLE b_table " +
                     "  (a_string varchar not null, col1 integer" +
                     "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
             createTestTable(getUrl(), ddl);
 
-            ddl = "select a_string, col1 from test_table where a_string in (select a_string from  " +
-                    "(select * from test_table union all select * from b_table))";
-            conn.createStatement().executeQuery(ddl);
-        }  catch (SQLException e) { 
+            dml = "UPSERT INTO b_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 60);
+            stmt.execute();
+            conn.commit();
+
+            String[] queries = new String[2];
+            queries[0] = "select a_string, col1 from test_table where a_string in " +
+                    "(select a_string aa from b_table where a_string != 'a' union all select a_string bb from b_table)";
+            queries[1] = "select a_string, col1 from test_table where a_string in (select a_string from  " +
+                    "(select a_string from b_table where a_string != 'a' union all select a_string from b_table))";
+            for (String query : queries) {
+                ResultSet rs = conn.createStatement().executeQuery(query);
+                assertTrue(rs.next());
+                assertEquals("a", rs.getString(1));
+                assertEquals(10, rs.getInt(2));
+                assertFalse(rs.next());
+            }
         } finally {
             conn.close();
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 295bd79..9f60424 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -586,7 +586,7 @@ single_select returns [SelectStatement ret]
         (WHERE where=expression)?
         (GROUP BY group=group_by)?
         (HAVING having=expression)?
-        { ParseContext context = contextStack.peek(); $ret = factory.select(from, h, d!=null, sel, where, group, having, null, null, getBindCount(), context.isAggregate(), context.hasSequences()); }
+        { ParseContext context = contextStack.peek(); $ret = factory.select(from, h, d!=null, sel, where, group, having, null, null, getBindCount(), context.isAggregate(), context.hasSequences(), null); }
     ;
 finally{ contextStack.pop(); }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 322d24a..b8e68f9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -322,7 +322,7 @@ public class DeleteCompiler {
                         hint, false, aliasedNodes, delete.getWhere(), 
                         Collections.<ParseNode>emptyList(), null, 
                         delete.getOrderBy(), delete.getLimit(),
-                        delete.getBindCount(), false, false);
+                        delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList());
                 select = StatementNormalizer.normalize(select, resolver);
                 SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolver, connection);
                 if (transformedSelect != select) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 98a1108..da78b24 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -469,7 +469,7 @@ public class FromCompiler {
                     if (node instanceof WildcardParseNode
                             || node instanceof TableWildcardParseNode
                             || node instanceof FamilyWildcardParseNode)
-                        throw new SQLException("Encountered wildcard in subqueries.");
+                        throw new SQLFeatureNotSupportedException("Wildcard in subqueries not supported.");
 
                     alias = SchemaUtil.normalizeIdentifier(node.getAlias());
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 98b7edb..af6c712 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -688,7 +688,7 @@ public class JoinCompiler {
             if (isSubselect())
                 return SubselectRewriter.applyOrderBy(SubselectRewriter.applyPostFilters(subselect, preFilters, tableNode.getAlias()), orderBy, tableNode.getAlias());
 
-            return NODE_FACTORY.select(tableNode, select.getHint(), false, selectNodes, getPreFiltersCombined(), null, null, orderBy, null, 0, false, select.hasSequence());
+            return NODE_FACTORY.select(tableNode, select.getHint(), false, selectNodes, getPreFiltersCombined(), null, null, orderBy, null, 0, false, select.hasSequence(), Collections.<SelectStatement>emptyList());
         }
 
         public boolean hasFilters() {
@@ -1267,7 +1267,7 @@ public class JoinCompiler {
         String tableAlias = tableRef.getTableAlias();
         TableNode from = NODE_FACTORY.namedTable(tableAlias == null ? null : '"' + tableAlias + '"', tName, dynamicCols);
 
-        return NODE_FACTORY.select(from, hintNode, false, selectList, where, groupBy, null, orderBy, null, 0, groupBy != null, hasSequence);
+        return NODE_FACTORY.select(from, hintNode, false, selectList, where, groupBy, null, orderBy, null, 0, groupBy != null, hasSequence, Collections.<SelectStatement>emptyList());
     }
 
     public static PTable joinProjectedTables(PTable left, PTable right, JoinType type) throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index f8177e6..16a7a33 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -174,7 +174,7 @@ public class QueryCompiler {
         }
         UnionCompiler.checkProjectionNumAndTypes(plans);
 
-        TableRef tableRef = UnionCompiler.contructSchemaTable(statement, plans.get(0));
+        TableRef tableRef = UnionCompiler.contructSchemaTable(statement, plans.get(0), select.hasWildcard() ? null : select.getSelect());
         ColumnResolver resolver = FromCompiler.getResolver(tableRef);
         StatementContext context = new StatementContext(statement, resolver, scan, sequenceManager);
 
@@ -422,7 +422,7 @@ public class QueryCompiler {
         context.setResolver(resolver);
         TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString()));
         ParseNode where = joinTable.getPostFiltersCombined();
-        SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode> emptyList(), where, null, null, orderBy, null, 0, false, joinTable.getStatement().hasSequence())
+        SelectStatement select = asSubquery ? NODE_FACTORY.select(from, joinTable.getStatement().getHint(), false, Collections.<AliasedNode> emptyList(), where, null, null, orderBy, null, 0, false, joinTable.getStatement().hasSequence(), Collections.<SelectStatement>emptyList())
                 : NODE_FACTORY.select(joinTable.getStatement(), from, where);
         
         return compileSingleFlatQuery(context, select, binds, asSubquery, false, innerPlan, null, isInRowKeyOrder);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
index 60067e5..8e887a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
@@ -36,6 +36,7 @@ import org.apache.phoenix.parse.ColumnParseNode;
 import org.apache.phoenix.parse.ComparisonParseNode;
 import org.apache.phoenix.parse.CompoundParseNode;
 import org.apache.phoenix.parse.ExistsParseNode;
+import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.parse.InParseNode;
 import org.apache.phoenix.parse.JoinTableNode.JoinType;
 import org.apache.phoenix.parse.LiteralParseNode;
@@ -139,7 +140,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
         }
         
         SubqueryParseNode subqueryNode = (SubqueryParseNode) l.get(1);
-        SelectStatement subquery = subqueryNode.getSelectNode();
+        SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode());
         String rhsTableAlias = ParseNodeFactory.createTempAlias();
         List<AliasedNode> selectNodes = fixAliasedNodes(subquery.getSelect(), true);
         subquery = NODE_FACTORY.select(subquery, !node.isSubqueryDistinct(), selectNodes);
@@ -160,7 +161,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
         }
         
         SubqueryParseNode subqueryNode = (SubqueryParseNode) l.get(0);
-        SelectStatement subquery = subqueryNode.getSelectNode();
+        SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode());
         String rhsTableAlias = ParseNodeFactory.createTempAlias();
         JoinConditionExtractor conditionExtractor = new JoinConditionExtractor(subquery, resolver, connection, rhsTableAlias);
         ParseNode where = subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor);
@@ -199,7 +200,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
         }
         
         SubqueryParseNode subqueryNode = (SubqueryParseNode) secondChild;
-        SelectStatement subquery = subqueryNode.getSelectNode();
+        SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode());
         String rhsTableAlias = ParseNodeFactory.createTempAlias();
         JoinConditionExtractor conditionExtractor = new JoinConditionExtractor(subquery, resolver, connection, rhsTableAlias);
         ParseNode where = subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor);
@@ -282,7 +283,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
         }
         
         SubqueryParseNode subqueryNode = (SubqueryParseNode) firstChild;
-        SelectStatement subquery = subqueryNode.getSelectNode();
+        SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode());
         String rhsTableAlias = ParseNodeFactory.createTempAlias();
         JoinConditionExtractor conditionExtractor = new JoinConditionExtractor(subquery, resolver, connection, rhsTableAlias);
         ParseNode where = subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor);
@@ -339,7 +340,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
                 groupbyNodes.set(i - 1, aliasedNode.getNode());
             }
             SelectStatement derivedTableStmt = NODE_FACTORY.select(subquery, subquery.isDistinct(), derivedTableSelect, where, derivedTableGroupBy, true);
-            subquery = NODE_FACTORY.select(NODE_FACTORY.derivedTable(derivedTableAlias, derivedTableStmt), subquery.getHint(), false, selectNodes, null, groupbyNodes, null, Collections.<OrderByNode> emptyList(), null, subquery.getBindCount(), true, false);
+            subquery = NODE_FACTORY.select(NODE_FACTORY.derivedTable(derivedTableAlias, derivedTableStmt), subquery.getHint(), false, selectNodes, null, groupbyNodes, null, Collections.<OrderByNode> emptyList(), null, subquery.getBindCount(), true, false, Collections.<SelectStatement>emptyList());
         }
         
         ParseNode onNode = conditionExtractor.getJoinCondition();
@@ -357,6 +358,14 @@ public class SubqueryRewriter extends ParseNodeRewriter {
         return Lists.newArrayList(firstChild, secondChild);
     }
     
+    private SelectStatement fixSubqueryStatement(SelectStatement select) {
+        if (!select.isUnion())
+            return select;
+        
+        // Wrap as a derived table.
+        return NODE_FACTORY.select(NODE_FACTORY.derivedTable(ParseNodeFactory.createTempAlias(), select), HintNode.EMPTY_HINT_NODE, false, select.getSelect(), null, null, null, null, null, select.getBindCount(), false, false, Collections.<SelectStatement> emptyList());
+    }
+    
     private List<AliasedNode> fixAliasedNodes(List<AliasedNode> nodes, boolean addSelectOne) {
         List<AliasedNode> normNodes = Lists.<AliasedNode> newArrayListWithExpectedSize(nodes.size() + (addSelectOne ? 1 : 0));
         if (addSelectOne) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
index 3f069ff..269232e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
@@ -26,6 +26,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.AliasedNode;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnImpl;
 import org.apache.phoenix.schema.PName;
@@ -66,12 +67,13 @@ public class UnionCompiler {
         return selectPlans;
     }
 
-    public static TableRef contructSchemaTable(PhoenixStatement statement, QueryPlan plan) throws SQLException {
+    public static TableRef contructSchemaTable(PhoenixStatement statement, QueryPlan plan, List<AliasedNode> selectNodes) throws SQLException {
         List<PColumn> projectedColumns = new ArrayList<PColumn>();
         for (int i=0; i< plan.getProjector().getColumnCount(); i++) {
             ColumnProjector colProj = plan.getProjector().getColumnProjector(i);
             Expression sourceExpression = colProj.getExpression();
-            PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(colProj.getName()), UNION_FAMILY_NAME,
+            String name = selectNodes == null ? colProj.getName() : selectNodes.get(i).getAlias();
+            PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(name), UNION_FAMILY_NAME,
                     sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
                     i, sourceExpression.getSortOrder(), 500, null, false, sourceExpression.toString());
             projectedColumns.add(projectedColumn);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 462e1f0..dfb9779 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -898,19 +898,11 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
 
     protected static class ExecutableNodeFactory extends ParseNodeFactory {
         @Override
-        public ExecutableSelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select,
-                ParseNode where, List<ParseNode> groupBy, ParseNode having,
-                List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
-            return this.select(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence, 
-                Collections.<SelectStatement>emptyList());
-        }
-
-        @Override
         public ExecutableSelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
                 List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate,
                 boolean hasSequence, List<SelectStatement> selects) {
             return new ExecutableSelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.<ParseNode>emptyList() : groupBy,
-                    having, orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence, selects);
+                    having, orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence, selects == null ? Collections.<SelectStatement>emptyList() : selects);
         }
 
         @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 382bba5..7b3a63a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -290,7 +290,7 @@ public class QueryOptimizer {
                             aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode));
                             nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"'));
                         }
-                        SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, indexSelect.getLimit(), indexSelect.getBindCount(), false, indexSelect.hasSequence());
+                        SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, indexSelect.getLimit(), indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.<SelectStatement>emptyList());
                         ParseNode outerWhere = FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true);
                         ParseNode extractedCondition = whereRewriter.getExtractedCondition();
                         if (extractedCondition != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 0f5074e..5eb641e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.parse;
 import java.lang.reflect.Constructor;
 import java.math.BigDecimal;
 import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -29,6 +28,8 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.exception.UnknownFunctionException;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
@@ -659,15 +660,8 @@ public class ParseNodeFactory {
             boolean hasSequence, List<SelectStatement> selects) {
 
         return new SelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.<ParseNode>emptyList() : groupBy, having,
-                orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence, selects);
+                orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence, selects == null ? Collections.<SelectStatement>emptyList() : selects);
     } 
-
-    public SelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select, ParseNode where,
-            List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate, boolean hasSequence) {
-
-        return new SelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.<ParseNode>emptyList() : groupBy, having,
-                orderBy == null ? Collections.<OrderByNode>emptyList() : orderBy, limit, bindCount, isAggregate, hasSequence);
-    }
     
     public UpsertStatement upsert(NamedTableNode table, HintNode hint, List<ColumnName> columns, List<ParseNode> values, SelectStatement select, int bindCount) {
         return new UpsertStatement(table, hint, columns, values, select, bindCount);
@@ -679,53 +673,53 @@ public class ParseNodeFactory {
 
     public SelectStatement select(SelectStatement statement, ParseNode where) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), statement.getHaving(),
-                statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, ParseNode where, ParseNode having) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), having,
-                statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
     
     public SelectStatement select(SelectStatement statement, List<AliasedNode> select, ParseNode where, List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), 
-                select, where, groupBy, having, orderBy, statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                select, where, groupBy, having, orderBy, statement.getLimit(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
     
     public SelectStatement select(SelectStatement statement, TableNode table) {
         return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), statement.getWhere(), statement.getGroupBy(),
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(),
-                statement.hasSequence());
+                statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, TableNode table, ParseNode where) {
         return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(),
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(),
-                statement.hasSequence());
+                statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, boolean isDistinct, List<AliasedNode> select) {
         return select(statement.getFrom(), statement.getHint(), isDistinct, select, statement.getWhere(), statement.getGroupBy(),
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(),
-                statement.hasSequence());
+                statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, boolean isDistinct, List<AliasedNode> select, ParseNode where) {
         return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, statement.getGroupBy(),
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(),
-                statement.hasSequence());
+                statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, boolean isDistinct, List<AliasedNode> select, ParseNode where, List<ParseNode> groupBy, boolean isAggregate) {
         return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, groupBy,
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), isAggregate,
-                statement.hasSequence());
+                statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, List<OrderByNode> orderBy) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
                 statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, statement.getLimit(),
-                statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+                statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, HintNode hint) {
@@ -737,39 +731,65 @@ public class ParseNodeFactory {
     public SelectStatement select(SelectStatement statement, HintNode hint, ParseNode where) {
         return select(statement.getFrom(), hint, statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(),
                 statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), statement.isAggregate(),
-                statement.hasSequence());
+                statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
             statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, limit,
-            bindCount, isAggregate || statement.isAggregate(), statement.hasSequence());
+            bindCount, isAggregate || statement.isAggregate(), statement.hasSequence(), statement.getSelects());
 
     }
 
     public SelectStatement select(SelectStatement statement, LimitNode limit) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
             statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), limit,
-            statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+            statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(SelectStatement statement, List<OrderByNode> orderBy, LimitNode limit) {
         return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(),
             statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, limit,
-            statement.getBindCount(), statement.isAggregate(), statement.hasSequence());
+            statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects());
     }
 
     public SelectStatement select(List<SelectStatement> statements, List<OrderByNode> orderBy, LimitNode limit, int bindCount, boolean isAggregate) {
         if (statements.size() == 1)
-            return select(statements.get(0), orderBy, limit, bindCount, isAggregate);
+            return select(statements.get(0), orderBy, limit, bindCount, isAggregate);        
+
+        // Get a list of adjusted aliases from a non-wildcard sub-select if any. 
+        // We do not check the number of select nodes among all sub-selects, as 
+        // it will be done later at compile stage. Empty or different aliases 
+        // are ignored, since they cannot be referred by outer queries.
+        List<String> aliases = Lists.<String> newArrayList();
+        for (int i = 0; i < statements.size() && aliases.isEmpty(); i++) {
+            SelectStatement subselect = statements.get(i);
+            if (!subselect.hasWildcard()) {
+                for (AliasedNode aliasedNode : subselect.getSelect()) {
+                    String alias = aliasedNode.getAlias();
+                    if (alias == null) {
+                        alias = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias());
+                    }
+                    aliases.add(alias == null ? createTempAlias() : alias);
+                }
+            }
+        }
+
+        List<AliasedNode> aliasedNodes;
+        if (aliases.isEmpty()) {
+            aliasedNodes = Lists.newArrayList(aliasedNode(null, wildcard()));
+        } else {
+            aliasedNodes = Lists.newArrayListWithExpectedSize(aliases.size());
+            for (String alias : aliases) {
+                aliasedNodes.add(aliasedNode(alias, column(null, alias, alias)));
+            }
+        }
         
-        return select(null, HintNode.EMPTY_HINT_NODE, false, Lists.newArrayList(aliasedNode(null, wildcard())), 
+        return select(null, HintNode.EMPTY_HINT_NODE, false, aliasedNodes, 
                 null, null, null, orderBy, limit, bindCount, false, false, statements);
     }
 
     public SubqueryParseNode subquery(SelectStatement select, boolean expectSingleRow) {
-        if (select.isUnion()) 
-            throw new RuntimeException(new SQLFeatureNotSupportedException());
         return new SubqueryParseNode(select, expectSingleRow);
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9ddb484a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
index 08cec87..44b24af 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
@@ -42,7 +42,7 @@ public class SelectStatement implements FilterableStatement {
                     Collections.<AliasedNode>singletonList(new AliasedNode(null, LiteralParseNode.ONE)),
                     null, Collections.<ParseNode>emptyList(),
                     null, Collections.<OrderByNode>emptyList(),
-                    null, 0, false, false);
+                    null, 0, false, false, Collections.<SelectStatement>emptyList());
     public static final SelectStatement COUNT_ONE =
             new SelectStatement(
                     null, null, false,
@@ -54,14 +54,14 @@ public class SelectStatement implements FilterableStatement {
                                 new BuiltInFunctionInfo(CountAggregateFunction.class, CountAggregateFunction.class.getAnnotation(BuiltInFunction.class))))),
                     null, Collections.<ParseNode>emptyList(), 
                     null, Collections.<OrderByNode>emptyList(), 
-                    null, 0, true, false);
+                    null, 0, true, false, Collections.<SelectStatement>emptyList());
     public static SelectStatement create(SelectStatement select, HintNode hint) {
         if (select.getHint() == hint || hint.isEmpty()) {
             return select;
         }
         return new SelectStatement(select.getFrom(), hint, select.isDistinct(), 
                 select.getSelect(), select.getWhere(), select.getGroupBy(), select.getHaving(), 
-                select.getOrderBy(), select.getLimit(), select.getBindCount(), select.isAggregate(), select.hasSequence());
+                select.getOrderBy(), select.getLimit(), select.getBindCount(), select.isAggregate(), select.hasSequence(), select.getSelects());
     }
     
     public SelectStatement combine(ParseNode where) {
@@ -73,13 +73,13 @@ public class SelectStatement implements FilterableStatement {
         }
         return new SelectStatement(this.getFrom(), this.getHint(), this.isDistinct(), 
                 this.getSelect(), where, this.getGroupBy(), this.getHaving(), 
-                this.getOrderBy(), this.getLimit(), this.getBindCount(), this.isAggregate(), this.hasSequence());
+                this.getOrderBy(), this.getLimit(), this.getBindCount(), this.isAggregate(), this.hasSequence(), this.selects);
     }
     
     public static SelectStatement create(SelectStatement select, List<AliasedNode> selects) {
         return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), 
                 selects, select.getWhere(), select.getGroupBy(), select.getHaving(), 
-                select.getOrderBy(), select.getLimit(), select.getBindCount(), select.isAggregate(), select.hasSequence());
+                select.getOrderBy(), select.getLimit(), select.getBindCount(), select.isAggregate(), select.hasSequence(), select.getSelects());
     }
     
     // Copy constructor for sub select statements in a union
@@ -87,7 +87,7 @@ public class SelectStatement implements FilterableStatement {
             List<OrderByNode> orderBy, LimitNode limit, boolean isAggregate) {
         return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), 
                 select.getSelect(), select.getWhere(), select.getGroupBy(), select.getHaving(), 
-                orderBy, limit, select.getBindCount(), isAggregate, select.hasSequence());
+                orderBy, limit, select.getBindCount(), isAggregate, select.hasSequence(), select.getSelects());
     }
 
     private final TableNode fromTable;
@@ -102,6 +102,7 @@ public class SelectStatement implements FilterableStatement {
     private final int bindCount;
     private final boolean isAggregate;
     private final boolean hasSequence;
+    private final boolean hasWildcard;
     private final List<SelectStatement> selects = new ArrayList<SelectStatement>();
     
     @Override
@@ -228,17 +229,19 @@ public class SelectStatement implements FilterableStatement {
         this.bindCount = bindCount;
         this.isAggregate = isAggregate || groupBy.size() != countConstants(groupBy) || this.having != null;
         this.hasSequence = hasSequence;
+        boolean hasWildcard = false;
+        for (AliasedNode aliasedNode : select) {
+            ParseNode node = aliasedNode.getNode();
+            if (node instanceof WildcardParseNode || node instanceof TableWildcardParseNode || node instanceof FamilyWildcardParseNode) {
+                hasWildcard = true;
+                break;
+            }
+        }
+        this.hasWildcard = hasWildcard;
         if (!selects.isEmpty()) {
             this.selects.addAll(selects);
         }
     }
-
-    public SelectStatement(TableNode from, HintNode hint, boolean isDistinct, List<AliasedNode> select,
-            ParseNode where, List<ParseNode> groupBy, ParseNode having, List<OrderByNode> orderBy, LimitNode limit,
-            int bindCount, boolean isAggregate, boolean hasSequence) {
-        this(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, bindCount, isAggregate, hasSequence,
-                Collections.<SelectStatement>emptyList());
-    }
     
     @Override
     public boolean isDistinct() {
@@ -326,4 +329,8 @@ public class SelectStatement implements FilterableStatement {
     public List<SelectStatement> getSelects() {
         return selects;
     }
+    
+    public boolean hasWildcard() {
+        return hasWildcard;
+    }
 }


[19/50] [abbrv] phoenix git commit: PHOENIX-1803 Fix flapping PhoenixServerRpcIT.testMetadataQos

Posted by ma...@apache.org.
PHOENIX-1803 Fix flapping PhoenixServerRpcIT.testMetadataQos


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3a0ce7d0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3a0ce7d0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3a0ce7d0

Branch: refs/heads/calcite
Commit: 3a0ce7d0f31bddc1ea802d66dde91bdde0e01504
Parents: eb73271
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Thu Apr 2 17:50:58 2015 -0700
Committer: Thomas D'Silva <tw...@gmail.com>
Committed: Thu Apr 2 17:56:37 2015 -0700

----------------------------------------------------------------------
 .../src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3a0ce7d0/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index dbcd7ac..4d3620f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -221,8 +221,8 @@ public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
                     "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
             // query the table from another connection, so that SYSTEM.STATS will be used 
             conn.createStatement().execute("SELECT * FROM "+DATA_TABLE_FULL_NAME);
-            // verify that that metadata queue is used once 
-            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getMetadataRpcExecutor()).dispatch(Mockito.any(CallRunner.class));
+            // verify that that metadata queue is at least once 
+            Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getMetadataRpcExecutor(), Mockito.atLeastOnce()).dispatch(Mockito.any(CallRunner.class));
         }
         finally {
             conn.close();


[34/50] [abbrv] phoenix git commit: PHOENIX-1749 ORDER BY should support ordinal position as well as expression (Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1749 ORDER BY should support ordinal position as well as expression (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/795debfe
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/795debfe
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/795debfe

Branch: refs/heads/calcite
Commit: 795debfe568206bc9a90c15c6acf628c4f9460d4
Parents: 6b1818c
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Apr 11 17:37:25 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Apr 11 17:37:25 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/OrderByIT.java   | 391 ++++++++++++++++++-
 .../apache/phoenix/compile/OrderByCompiler.java |  34 +-
 .../apache/phoenix/compile/QueryCompiler.java   |   2 +-
 .../phoenix/compile/QueryOptimizerTest.java     |  14 +-
 4 files changed, 431 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/795debfe/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
index a8beb1e..74eb7fe 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
@@ -30,11 +30,14 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
+import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.Properties;
 
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -80,7 +83,7 @@ public class OrderByIT extends BaseClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
 
     @Test
     public void testDescMultiOrderByExpr() throws Exception {
@@ -118,4 +121,390 @@ public class OrderByIT extends BaseClientManagedTimeIT {
             conn.close();
         }
     }
+
+    @Test
+    public void testOrderByWithPosition() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE t_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            String dml = "UPSERT INTO t_table VALUES(?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 40);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            conn.commit();
+
+            String query = "select count(*), col1 from t_table group by col1 order by 2";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));
+            assertTrue(rs.next());
+            assertEquals(1,rs.getInt(1));  
+            assertFalse(rs.next());  
+
+            query = "select a_string x, col1 y from t_table order by x";
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(40,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));  
+            assertEquals(30,rs.getInt(2));
+            assertFalse(rs.next());  
+
+            query = "select * from t_table order by 2";
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));  
+            assertEquals(30,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));
+            assertEquals(40,rs.getInt(2));
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+
+    @Test
+    public void testColumnFamily() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE x_table " +
+                    "  (a_string varchar not null, cf1.a integer, cf1.b varchar, col1 integer, cf2.c varchar, cf2.d integer, col2 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            String dml = "UPSERT INTO x_table VALUES(?,?,?,?,?,?,?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 40);
+            stmt.setString(3, "aa");
+            stmt.setInt(4, 10);
+            stmt.setString(5, "bb");
+            stmt.setInt(6, 20);
+            stmt.setInt(7, 1);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.setString(3, "cc");
+            stmt.setInt(4, 50);
+            stmt.setString(5, "dd");
+            stmt.setInt(6, 60);
+            stmt.setInt(7, 3);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 40);
+            stmt.setString(3, "bb");
+            stmt.setInt(4, 5);
+            stmt.setString(5, "aa");
+            stmt.setInt(6, 80);
+            stmt.setInt(7, 2);
+            stmt.execute();
+            conn.commit();
+
+            String query = "select * from x_table order by 2, 5";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));
+            assertEquals(30,rs.getInt(2));
+            assertEquals("cc",rs.getString(3));
+            assertEquals(50,rs.getInt(4));
+            assertEquals("dd",rs.getString(5));
+            assertEquals(60,rs.getInt(6));
+            assertEquals(3,rs.getInt(7));
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertEquals("bb",rs.getString(3));
+            assertEquals(5,rs.getInt(4));
+            assertEquals("aa",rs.getString(5));
+            assertEquals(80,rs.getInt(6));
+            assertEquals(2,rs.getInt(7));   
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertEquals("aa",rs.getString(3));
+            assertEquals(10,rs.getInt(4));
+            assertEquals("bb",rs.getString(5));
+            assertEquals(20,rs.getInt(6));
+            assertEquals(1,rs.getInt(7));         
+            assertFalse(rs.next());  
+
+            query = "select * from x_table order by 7";
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertEquals("aa",rs.getString(3));
+            assertEquals(10,rs.getInt(4));
+            assertEquals("bb",rs.getString(5));
+            assertEquals(20,rs.getInt(6));
+            assertEquals(1,rs.getInt(7));  
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertEquals("bb",rs.getString(3));
+            assertEquals(5,rs.getInt(4));
+            assertEquals("aa",rs.getString(5));
+            assertEquals(80,rs.getInt(6));
+            assertEquals(2,rs.getInt(7));  
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));
+            assertEquals(30,rs.getInt(2));
+            assertEquals("cc",rs.getString(3));
+            assertEquals(50,rs.getInt(4));
+            assertEquals("dd",rs.getString(5));
+            assertEquals(60,rs.getInt(6));
+            assertEquals(3,rs.getInt(7));
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testOrderByWithJoin() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE s_table " +
+                    "  (a_string varchar not null, cf1.a integer, cf1.b varchar, col1 integer, cf2.c varchar, cf2.d integer " +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            String dml = "UPSERT INTO s_table VALUES(?,?,?,?,?,?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 40);
+            stmt.setString(3, "aa");
+            stmt.setInt(4, 10);
+            stmt.setString(5, "bb");
+            stmt.setInt(6, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.setString(3, "cc");
+            stmt.setInt(4, 50);
+            stmt.setString(5, "dd");
+            stmt.setInt(6, 60);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 40);
+            stmt.setString(3, "bb");
+            stmt.setInt(4, 5);
+            stmt.setString(5, "aa");
+            stmt.setInt(6, 80);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE t_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            dml = "UPSERT INTO t_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 40);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            conn.commit();
+
+            String query = "select t1.* from s_table t1 join t_table t2 on t1.a_string = t2.a_string order by 3";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertEquals("aa",rs.getString(3));
+            assertEquals(10,rs.getInt(4));
+            assertEquals("bb",rs.getString(5));
+            assertEquals(20,rs.getInt(6));
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertEquals("bb",rs.getString(3));
+            assertEquals(5,rs.getInt(4));
+            assertEquals("aa",rs.getString(5));
+            assertEquals(80,rs.getInt(6));         
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));
+            assertEquals(30,rs.getInt(2));
+            assertEquals("cc",rs.getString(3));
+            assertEquals(50,rs.getInt(4));
+            assertEquals("dd",rs.getString(5));
+            assertEquals(60,rs.getInt(6));
+            assertFalse(rs.next());  
+
+            query = "select t1.a_string, t2.col1 from s_table t1 join t_table t2 on t1.a_string = t2.a_string order by 2";
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));  
+            assertEquals(20,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));  
+            assertEquals(30,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertFalse(rs.next()); 
+        } catch (SQLException e) {
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testOrderByWithUnionAll() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE x_table " +
+                    "  (a_string varchar not null, cf1.a integer, cf1.b varchar, col1 integer, cf2.c varchar, cf2.d integer " +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+            String dml = "UPSERT INTO x_table VALUES(?,?,?,?,?,?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 40);
+            stmt.setString(3, "aa");
+            stmt.setInt(4, 10);
+            stmt.setString(5, "bb");
+            stmt.setInt(6, 20);
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 30);
+            stmt.setString(3, "cc");
+            stmt.setInt(4, 50);
+            stmt.setString(5, "dd");
+            stmt.setInt(6, 60);
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 40);
+            stmt.setString(3, "bb");
+            stmt.setInt(4, 5);
+            stmt.setString(5, "aa");
+            stmt.setInt(6, 80);
+            stmt.execute();
+            conn.commit();
+
+            ddl = "CREATE TABLE y_table " +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            dml = "UPSERT INTO y_table VALUES(?, ?)";
+            stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "aa");
+            stmt.setInt(2, 40);
+            stmt.execute();
+            stmt.setString(1, "bb");
+            stmt.setInt(2, 10);
+            stmt.execute();
+            stmt.setString(1, "cc");
+            stmt.setInt(2, 30);
+            stmt.execute();
+            conn.commit();
+
+            String query = "select a_string, cf2.d from x_table union all select * from y_table order by 2";
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("bb",rs.getString(1));  
+            assertEquals(10,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("a",rs.getString(1));  
+            assertEquals(20,rs.getInt(2));      
+            assertTrue(rs.next());
+            assertEquals("cc",rs.getString(1));
+            assertEquals(30,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("aa",rs.getString(1));  
+            assertEquals(40,rs.getInt(2));
+            assertTrue(rs.next());
+            assertEquals("c",rs.getString(1));  
+            assertEquals(60,rs.getInt(2));      
+            assertTrue(rs.next());
+            assertEquals("b",rs.getString(1));
+            assertEquals(80,rs.getInt(2));
+            assertFalse(rs.next());  
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
+    public void testOrderByWithExpression() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String ddl = "CREATE TABLE e_table " +
+                    "  (a_string varchar not null, col1 integer, col2 integer, col3 timestamp, col4 varchar" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+            createTestTable(getUrl(), ddl);
+
+            Date date = new Date(System.currentTimeMillis());
+            String dml = "UPSERT INTO e_table VALUES(?, ?, ?, ?, ?)";
+            PreparedStatement stmt = conn.prepareStatement(dml);
+            stmt.setString(1, "a");
+            stmt.setInt(2, 40);
+            stmt.setInt(3, 20);
+            stmt.setDate(4, new Date(date.getTime()));
+            stmt.setString(5, "xxyy");
+            stmt.execute();
+            stmt.setString(1, "b");
+            stmt.setInt(2, 50);
+            stmt.setInt(3, 30);
+            stmt.setDate(4, new Date(date.getTime()-500));
+            stmt.setString(5, "yyzz");
+            stmt.execute();
+            stmt.setString(1, "c");
+            stmt.setInt(2, 60);
+            stmt.setInt(3, 20);
+            stmt.setDate(4, new Date(date.getTime()-300));
+            stmt.setString(5, "ddee");
+            stmt.execute();
+            conn.commit();
+
+            String query = "SELECT col1+col2, col4, TRUNC(col3, 'HOUR') FROM e_table ORDER BY 1, 2";
+            conn.createStatement().executeQuery(query);
+            fail();
+        } catch (SQLException e) {
+        } finally {
+            conn.close();
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/795debfe/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
index 215f59e..d8e86ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
@@ -29,12 +29,17 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.OrderByExpression;
-import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.parse.ColumnParseNode;
+import org.apache.phoenix.parse.LiteralParseNode;
 import org.apache.phoenix.parse.OrderByNode;
+import org.apache.phoenix.parse.ParseNode;
+import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PInteger;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -77,8 +82,9 @@ public class OrderByCompiler {
      * @throws SQLException
      */
     public static OrderBy compile(StatementContext context,
-                                  FilterableStatement statement,
+                                  SelectStatement statement,
                                   GroupBy groupBy, Integer limit, 
+                                  RowProjector projector,
                                   boolean isInRowKeyOrder) throws SQLException {
         List<OrderByNode> orderByNodes = statement.getOrderBy();
         if (orderByNodes.isEmpty()) {
@@ -91,7 +97,28 @@ public class OrderByCompiler {
         LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
         for (OrderByNode node : orderByNodes) {
             boolean isAscending = node.isAscending();
-            Expression expression = node.getNode().accept(visitor);
+            ParseNode parseNode = node.getNode();
+            Expression expression = null;
+            if (parseNode instanceof LiteralParseNode && ((LiteralParseNode)parseNode).getType() == PInteger.INSTANCE){
+                Integer index = (Integer)((LiteralParseNode)parseNode).getValue();
+                int size = projector.getColumnProjectors().size();
+                if (index > size || index <= 0 ) {
+                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND)
+                    .setMessage("").build().buildException();
+                }
+                ColumnProjector colProj = projector.getColumnProjector(index-1);
+                TableName  tableName = null;
+                if (statement.getSelects().size() > 0 )
+                    tableName = TableName.create(context.getCurrentTable().getTable().getName().toString(), null);
+                else {
+                    tableName =  TableName.create(context.getResolver().getTables().get(0).getTable().getSchemaName().toString(), 
+                            context.getResolver().getTables().get(0).getTable().getTableName().toString());
+                }
+                ColumnParseNode colParseNode = new ColumnParseNode(tableName, colProj.getName(), null);
+                expression = colParseNode.accept(visitor);
+            } else {
+                expression = node.getNode().accept(visitor);
+            }
             if (!expression.isStateless() && visitor.addEntry(expression, isAscending ? SortOrder.ASC : SortOrder.DESC)) {
                 // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns)
                 if (!visitor.isAggregate()) {
@@ -135,7 +162,6 @@ public class OrderByCompiler {
         return new OrderBy(Lists.newArrayList(orderByExpressions.iterator()));
     }
 
-
     private OrderByCompiler() {
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/795debfe/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 16a7a33..3100664 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -532,8 +532,8 @@ public class QueryCompiler {
         Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode> newHashSet();
         Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
         context.setResolver(resolver); // recover resolver
-        OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, isInRowKeyOrder); 
         RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns);
+        OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, projector, isInRowKeyOrder); 
         // Final step is to build the query plan
         if (!asSubquery) {
             int maxRows = statement.getMaxRows();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/795debfe/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
index 9e37451..67c44bd 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
@@ -33,6 +33,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
@@ -80,10 +81,15 @@ public class QueryOptimizerTest extends BaseConnectionlessQueryTest {
     @Test
     public void testOrderByDropped() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
-        conn.createStatement().execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true");
-        PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
-        QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY 1,2,3");
-        assertEquals(OrderBy.EMPTY_ORDER_BY,plan.getOrderBy());
+        try{ 
+            conn.createStatement().execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true");
+            PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
+            QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY 1,2,3");
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND.getErrorCode(), e.getErrorCode());
+        } finally {
+            conn.close();
+        }
     }
 
     @Test


[31/50] [abbrv] phoenix git commit: PHOENIX-1742 Update pom to Junit 4.12

Posted by ma...@apache.org.
PHOENIX-1742 Update pom to Junit 4.12


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ca4e2124
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ca4e2124
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ca4e2124

Branch: refs/heads/calcite
Commit: ca4e21242a573f4b58fb8959d83ff3b023f057a5
Parents: 5c32d19
Author: Samarth <sa...@salesforce.com>
Authored: Tue Apr 7 18:30:55 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Tue Apr 7 18:30:55 2015 -0700

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca4e2124/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index bfafe78..4793cf2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -108,7 +108,7 @@
 
     <!-- Test Dependencies -->
     <mockito-all.version>1.8.5</mockito-all.version>
-    <junit.version>4.12-beta-3</junit.version>
+    <junit.version>4.12</junit.version>
 
     <!-- Plugin versions -->
     <maven-eclipse-plugin.version>2.9</maven-eclipse-plugin.version>


[12/50] [abbrv] phoenix git commit: PHOENIX-1797 Add more tests for date literals (Mike Friedman)

Posted by ma...@apache.org.
PHOENIX-1797 Add more tests for date literals (Mike Friedman)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0eca5f17
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0eca5f17
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0eca5f17

Branch: refs/heads/calcite
Commit: 0eca5f17f98bf7bf25541f3574256a532747fe6f
Parents: e2cf44c
Author: James Taylor <ja...@apache.org>
Authored: Tue Mar 31 14:12:14 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Tue Mar 31 14:13:40 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DateTimeIT.java  | 360 +++++++++++++++++++
 .../apache/phoenix/parse/QueryParserTest.java   |  18 +
 2 files changed, 378 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0eca5f17/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
new file mode 100644
index 0000000..371d82e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.ATABLE_NAME;
+import static org.apache.phoenix.util.TestUtil.A_VALUE;
+import static org.apache.phoenix.util.TestUtil.B_VALUE;
+import static org.apache.phoenix.util.TestUtil.C_VALUE;
+import static org.apache.phoenix.util.TestUtil.E_VALUE;
+import static org.apache.phoenix.util.TestUtil.MILLIS_IN_DAY;
+import static org.apache.phoenix.util.TestUtil.ROW1;
+import static org.apache.phoenix.util.TestUtil.ROW2;
+import static org.apache.phoenix.util.TestUtil.ROW3;
+import static org.apache.phoenix.util.TestUtil.ROW4;
+import static org.apache.phoenix.util.TestUtil.ROW5;
+import static org.apache.phoenix.util.TestUtil.ROW6;
+import static org.apache.phoenix.util.TestUtil.ROW7;
+import static org.apache.phoenix.util.TestUtil.ROW8;
+import static org.apache.phoenix.util.TestUtil.ROW9;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.Types;
+import java.text.Format;
+import java.util.Calendar;
+
+import org.apache.phoenix.util.DateUtil;
+import org.junit.Test;
+
+
+public class DateTimeIT extends BaseHBaseManagedTimeIT {
+
+    protected Connection conn;
+    protected Date date;
+    protected static final String tenantId = getOrganizationId();
+
+    public DateTimeIT() throws Exception {
+        super();
+        conn = DriverManager.getConnection(getUrl());
+        date = new Date(System.currentTimeMillis());
+        initAtable();
+    }
+
+    protected void initAtable() throws Exception { 
+        ensureTableCreated(getUrl(), ATABLE_NAME, (byte[][])null);
+        PreparedStatement stmt = conn.prepareStatement(
+            "upsert into " + ATABLE_NAME +
+            "(" +
+            "    ORGANIZATION_ID, " +
+            "    ENTITY_ID, " +
+            "    A_STRING, " +
+            "    B_STRING, " +
+            "    A_INTEGER, " +
+            "    A_DATE, " +
+            "    X_DECIMAL, " +
+            "    X_LONG, " +
+            "    X_INTEGER," +
+            "    Y_INTEGER," +
+            "    A_BYTE," +
+            "    A_SHORT," +
+            "    A_FLOAT," +
+            "    A_DOUBLE," +
+            "    A_UNSIGNED_FLOAT," +
+            "    A_UNSIGNED_DOUBLE)" +
+                "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW1);
+        stmt.setString(3, A_VALUE);
+        stmt.setString(4, B_VALUE);
+        stmt.setInt(5, 1);
+        stmt.setDate(6, date);
+        stmt.setBigDecimal(7, null);
+        stmt.setNull(8, Types.BIGINT);
+        stmt.setNull(9, Types.INTEGER);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)1);
+        stmt.setShort(12, (short) 128);
+        stmt.setFloat(13, 0.01f);
+        stmt.setDouble(14, 0.0001);
+        stmt.setFloat(15, 0.01f);
+        stmt.setDouble(16, 0.0001);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW2);
+        stmt.setString(3, A_VALUE);
+        stmt.setString(4, C_VALUE);
+        stmt.setInt(5, 2);
+        stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1));
+        stmt.setBigDecimal(7, null);
+        stmt.setNull(8, Types.BIGINT);
+        stmt.setNull(9, Types.INTEGER);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)2);
+        stmt.setShort(12, (short) 129);
+        stmt.setFloat(13, 0.02f);
+        stmt.setDouble(14, 0.0002);
+        stmt.setFloat(15, 0.02f);
+        stmt.setDouble(16, 0.0002);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW3);
+        stmt.setString(3, A_VALUE);
+        stmt.setString(4, E_VALUE);
+        stmt.setInt(5, 3);
+        stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2));
+        stmt.setBigDecimal(7, null);
+        stmt.setNull(8, Types.BIGINT);
+        stmt.setNull(9, Types.INTEGER);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)3);
+        stmt.setShort(12, (short) 130);
+        stmt.setFloat(13, 0.03f);
+        stmt.setDouble(14, 0.0003);
+        stmt.setFloat(15, 0.03f);
+        stmt.setDouble(16, 0.0003);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW4);
+        stmt.setString(3, A_VALUE);
+        stmt.setString(4, B_VALUE);
+        stmt.setInt(5, 4);
+        stmt.setDate(6, date == null ? null : date);
+        stmt.setBigDecimal(7, null);
+        stmt.setNull(8, Types.BIGINT);
+        stmt.setNull(9, Types.INTEGER);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)4);
+        stmt.setShort(12, (short) 131);
+        stmt.setFloat(13, 0.04f);
+        stmt.setDouble(14, 0.0004);
+        stmt.setFloat(15, 0.04f);
+        stmt.setDouble(16, 0.0004);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW5);
+        stmt.setString(3, B_VALUE);
+        stmt.setString(4, C_VALUE);
+        stmt.setInt(5, 5);
+        stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1));
+        stmt.setBigDecimal(7, null);
+        stmt.setNull(8, Types.BIGINT);
+        stmt.setNull(9, Types.INTEGER);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)5);
+        stmt.setShort(12, (short) 132);
+        stmt.setFloat(13, 0.05f);
+        stmt.setDouble(14, 0.0005);
+        stmt.setFloat(15, 0.05f);
+        stmt.setDouble(16, 0.0005);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW6);
+        stmt.setString(3, B_VALUE);
+        stmt.setString(4, E_VALUE);
+        stmt.setInt(5, 6);
+        stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2));
+        stmt.setBigDecimal(7, null);
+        stmt.setNull(8, Types.BIGINT);
+        stmt.setNull(9, Types.INTEGER);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)6);
+        stmt.setShort(12, (short) 133);
+        stmt.setFloat(13, 0.06f);
+        stmt.setDouble(14, 0.0006);
+        stmt.setFloat(15, 0.06f);
+        stmt.setDouble(16, 0.0006);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW7);
+        stmt.setString(3, B_VALUE);
+        stmt.setString(4, B_VALUE);
+        stmt.setInt(5, 7);
+        stmt.setDate(6, date == null ? null : date);
+        stmt.setBigDecimal(7, BigDecimal.valueOf(0.1));
+        stmt.setLong(8, 5L);
+        stmt.setInt(9, 5);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)7);
+        stmt.setShort(12, (short) 134);
+        stmt.setFloat(13, 0.07f);
+        stmt.setDouble(14, 0.0007);
+        stmt.setFloat(15, 0.07f);
+        stmt.setDouble(16, 0.0007);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW8);
+        stmt.setString(3, B_VALUE);
+        stmt.setString(4, C_VALUE);
+        stmt.setInt(5, 8);
+        stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1));
+        stmt.setBigDecimal(7, BigDecimal.valueOf(3.9));
+        long l = Integer.MIN_VALUE - 1L;
+        assert(l < Integer.MIN_VALUE);
+        stmt.setLong(8, l);
+        stmt.setInt(9, 4);
+        stmt.setNull(10, Types.INTEGER);
+        stmt.setByte(11, (byte)8);
+        stmt.setShort(12, (short) 135);
+        stmt.setFloat(13, 0.08f);
+        stmt.setDouble(14, 0.0008);
+        stmt.setFloat(15, 0.08f);
+        stmt.setDouble(16, 0.0008);
+        stmt.execute();
+
+        stmt.setString(1, tenantId);
+        stmt.setString(2, ROW9);
+        stmt.setString(3, C_VALUE);
+        stmt.setString(4, E_VALUE);
+        stmt.setInt(5, 9);
+        stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2));
+        stmt.setBigDecimal(7, BigDecimal.valueOf(3.3));
+        l = Integer.MAX_VALUE + 1L;
+        assert(l > Integer.MAX_VALUE);
+        stmt.setLong(8, l);
+        stmt.setInt(9, 3);
+        stmt.setInt(10, 300);
+        stmt.setByte(11, (byte)9);
+        stmt.setShort(12, (short) 0);
+        stmt.setFloat(13, 0.09f);
+        stmt.setDouble(14, 0.0009);
+        stmt.setFloat(15, 0.09f);
+        stmt.setDouble(16, 0.0009);
+        stmt.execute();
+
+        conn.commit();
+    }
+
+    @Test
+    public void selectBetweenDates() throws Exception {
+        Format formatter = DateUtil.getDateFormatter("yyyy-MM-dd");
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        java.util.Date dateToday = cal.getTime();
+        cal.add(Calendar.DAY_OF_YEAR, 1);
+        java.util.Date dateTomorrow = cal.getTime();
+        String today = formatter.format(dateToday);
+        String tomorrow = formatter.format(dateTomorrow);
+        String query = "SELECT entity_id FROM ATABLE WHERE a_integer < 4 AND a_date BETWEEN date '" + today + "' AND date '" + tomorrow + "' ";
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals(ROW1, rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testSelectLiteralDate() throws Exception {
+        String s = DateUtil.DEFAULT_DATE_FORMATTER.format(date);
+        String query = "SELECT DATE '" + s + "' FROM ATABLE";
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals(date, rs.getDate(1));
+    }
+
+    @Test
+    public void testSelectLiteralDateCompare() throws Exception {
+        String query = "SELECT (DATE '" + date + "' = DATE '" + date + "') FROM ATABLE";
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertTrue(rs.next());
+        assertTrue(rs.getBoolean(1));
+    }
+
+    @Test
+    public void testSelectWhereDatesEqual() throws Exception {
+        String query = "SELECT entity_id FROM ATABLE WHERE  a_integer < 4 AND DATE '" + date + "' = DATE '" + date + "'";
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertTrue(rs.next());
+
+    }
+
+    @Test
+    public void testSelectWhereDateAndToDateEqual() throws Exception {
+        String query = "SELECT entity_id FROM ATABLE WHERE  a_integer < 4 AND DATE '" + date + "' = TO_DATE ('" + date + "')";
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertTrue(rs.next());
+
+    }
+
+    @Test
+    public void testSelectWhereDateAndTimestampEqual() throws Exception {
+        final String timestamp = "2012-09-08 07:08:23";
+        String query = "SELECT entity_id FROM ATABLE WHERE  a_integer < 4 AND DATE '" + timestamp + "' = TIMESTAMP '" + timestamp + "'";
+
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertTrue(rs.next());
+    }
+
+    @Test
+    public void testSelectWhereSameDatesUnequal() throws Exception {
+        String query = "SELECT entity_id FROM ATABLE WHERE  a_integer < 4 AND DATE '" + date + "' > DATE '" + date + "'";
+        Statement statement = conn.createStatement();
+        ResultSet rs = statement.executeQuery(query);
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testDateInList() throws Exception {
+        String query = "SELECT entity_id FROM ATABLE WHERE a_date IN (?,?) AND a_integer < 4";
+            PreparedStatement statement = conn.prepareStatement(query);
+            statement.setDate(1, new Date(0));
+            statement.setDate(2, date);
+            ResultSet rs = statement.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(ROW1, rs.getString(1));
+            assertFalse(rs.next());
+    }  
+
+    @Test
+    public void testDateBetweenLiterals() throws Exception {
+        Format formatter = DateUtil.getDateFormatter("yyyy-MM-dd");
+        Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        java.util.Date dateToday = cal.getTime();
+        cal.add(Calendar.DAY_OF_YEAR, 1);
+        java.util.Date dateTomorrow = cal.getTime();
+        String today = formatter.format(dateToday);
+        String tomorrow = formatter.format(dateTomorrow);
+        String query = "SELECT entity_id FROM ATABLE WHERE a_integer < 4 AND a_date BETWEEN date '" + today + "' AND date '" + tomorrow + "' ";
+            Statement statement = conn.createStatement();
+            ResultSet rs = statement.executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals(ROW1, rs.getString(1));
+            assertFalse(rs.next());
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0eca5f17/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index bf599ae..182757f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -749,6 +749,24 @@ public class QueryParserTest {
     }
     
     @Test
+    public void testParseDateEquality() throws Exception {
+        SQLParser parser = new SQLParser(new StringReader(
+            "select a from b\n" +
+            "where date '2014-01-04' = date '2014-01-04'"
+            ));
+        parser.parseStatement();
+    }
+
+    @Test
+    public void testParseDateIn() throws Exception {
+        SQLParser parser = new SQLParser(new StringReader(
+            "select a from b\n" +
+            "where date '2014-01-04' in (date '2014-01-04')"
+            ));
+        parser.parseStatement();
+    }
+    
+    @Test
     public void testUnknownLiteral() throws Exception {
         String sql = (
                 (


[28/50] [abbrv] phoenix git commit: PHOENIX-1818 - Move cluster-required tests to src/it

Posted by ma...@apache.org.
PHOENIX-1818 - Move cluster-required tests to src/it


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f666baa2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f666baa2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f666baa2

Branch: refs/heads/calcite
Commit: f666baa27ed97cb08ba964c53df74907a53ce001
Parents: 9ddb484
Author: ravimagham <ra...@apache.org>
Authored: Tue Apr 7 00:19:21 2015 -0700
Committer: ravimagham <ra...@apache.org>
Committed: Tue Apr 7 00:19:21 2015 -0700

----------------------------------------------------------------------
 phoenix-spark/src/it/resources/log4j.xml        |  41 +++
 phoenix-spark/src/it/resources/setup.sql        |  18 +
 .../apache/phoenix/spark/PhoenixRDDTest.scala   | 333 +++++++++++++++++++
 phoenix-spark/src/test/resources/log4j.xml      |  41 ---
 phoenix-spark/src/test/resources/setup.sql      |  18 -
 .../apache/phoenix/spark/PhoenixRDDTest.scala   | 333 -------------------
 6 files changed, 392 insertions(+), 392 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f666baa2/phoenix-spark/src/it/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/log4j.xml b/phoenix-spark/src/it/resources/log4j.xml
new file mode 100644
index 0000000..d4799da
--- /dev/null
+++ b/phoenix-spark/src/it/resources/log4j.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+  <appender name="console" class="org.apache.log4j.ConsoleAppender">
+    <param name="Target" value="System.out"/>
+
+    <layout class="org.apache.log4j.PatternLayout">
+      <param name="ConversionPattern" value="%-4r [%t] %-5p %c %x - %m%n"/>
+    </layout>
+  </appender>
+
+  <logger name="org.eclipse">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="org.apache">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name = "org.apache.phoenix.mapreduce">
+    <level value="FATAL"/>
+  </logger>
+
+  <logger name="org.mortbay">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="BlockStateChange">
+    <level value="ERROR"/>
+  </logger>
+
+  <logger name="io.netty">
+    <level value="ERROR"/>
+  </logger>
+
+  <root>
+    <priority value="INFO"/>
+    <appender-ref ref="console"/>
+  </root>
+</log4j:configuration>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f666baa2/phoenix-spark/src/it/resources/setup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/resources/setup.sql b/phoenix-spark/src/it/resources/setup.sql
new file mode 100644
index 0000000..14a7e7e
--- /dev/null
+++ b/phoenix-spark/src/it/resources/setup.sql
@@ -0,0 +1,18 @@
+CREATE TABLE table1 (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
+CREATE TABLE table2 (id BIGINT NOT NULL PRIMARY KEY, table1_id BIGINT, "t2col1" VARCHAR)
+UPSERT INTO table1 (id, col1) VALUES (1, 'test_row_1')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (1, 1, 'test_child_1')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (2, 1, 'test_child_2')
+UPSERT INTO table1 (id, col1) VALUES (2, 'test_row_2')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (3, 2, 'test_child_1')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (4, 2, 'test_child_2')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (5, 2, 'test_child_3')
+UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (6, 2, 'test_child_4')
+CREATE TABLE "table3" ("id" BIGINT NOT NULL PRIMARY KEY, "col1" VARCHAR)
+UPSERT INTO "table3" ("id", "col1") VALUES (1, 'foo')
+UPSERT INTO "table3" ("id", "col1") VALUES (2, 'bar')
+CREATE TABLE ARRAY_TEST_TABLE (ID BIGINT NOT NULL PRIMARY KEY, VCARRAY VARCHAR[])
+UPSERT INTO ARRAY_TEST_TABLE (ID, VCARRAY) VALUES (1, ARRAY['String1', 'String2', 'String3'])
+CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, TIMESERIES_KEY TIMESTAMP NOT NULL CONSTRAINT pk PRIMARY KEY (ID, TIMESERIES_KEY))
+UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, CAST(CURRENT_TIME() AS TIMESTAMP))
+CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER, col3 DATE)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f666baa2/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
new file mode 100644
index 0000000..63cb6e4
--- /dev/null
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
@@ -0,0 +1,333 @@
+/*
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ */
+package org.apache.phoenix.spark
+
+import java.sql.{Connection, DriverManager}
+import java.util.Date
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hbase.{HConstants, HBaseTestingUtility}
+import org.apache.phoenix.schema.ColumnNotFoundException
+import org.apache.phoenix.schema.types.PVarchar
+import org.apache.phoenix.util.ColumnInfo
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.types.{StringType, StructField}
+import org.apache.spark.{SparkConf, SparkContext}
+import org.joda.time.DateTime
+import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
+import org.apache.phoenix.spark._
+
+import scala.collection.mutable.ListBuffer
+
+class PhoenixRDDTest extends FunSuite with Matchers with BeforeAndAfterAll {
+  lazy val hbaseTestingUtility = {
+    new HBaseTestingUtility()
+  }
+
+  lazy val hbaseConfiguration = {
+    val conf = hbaseTestingUtility.getConfiguration
+
+    val quorum = conf.get("hbase.zookeeper.quorum")
+    val clientPort = conf.get("hbase.zookeeper.property.clientPort")
+    val znodeParent = conf.get("zookeeper.znode.parent")
+
+    // This is an odd one - the Zookeeper Quorum entry in the config is totally wrong. It's
+    // just reporting localhost.
+    conf.set(org.apache.hadoop.hbase.HConstants.ZOOKEEPER_QUORUM, s"$quorum:$clientPort:$znodeParent")
+
+    conf
+  }
+
+  lazy val quorumAddress = {
+    hbaseConfiguration.get("hbase.zookeeper.quorum")
+  }
+
+  lazy val zookeeperClientPort = {
+    hbaseConfiguration.get("hbase.zookeeper.property.clientPort")
+  }
+
+  lazy val zookeeperZnodeParent = {
+    hbaseConfiguration.get("zookeeper.znode.parent")
+  }
+
+  lazy val hbaseConnectionString = {
+    s"$quorumAddress:$zookeeperClientPort:$zookeeperZnodeParent"
+  }
+
+  var conn: Connection = _
+
+  override def beforeAll() {
+    hbaseTestingUtility.startMiniCluster()
+
+    conn = DriverManager.getConnection(s"jdbc:phoenix:$hbaseConnectionString")
+
+    conn.setAutoCommit(true)
+
+    // each SQL statement used to set up Phoenix must be on a single line. Yes, that
+    // can potentially make large lines.
+    val setupSqlSource = getClass.getClassLoader.getResourceAsStream("setup.sql")
+
+    val setupSql = scala.io.Source.fromInputStream(setupSqlSource).getLines()
+
+    for (sql <- setupSql) {
+      val stmt = conn.createStatement()
+
+      stmt.execute(sql)
+
+      stmt.close()
+    }
+
+    conn.commit()
+  }
+
+  override def afterAll() {
+    conn.close()
+    hbaseTestingUtility.shutdownMiniCluster()
+  }
+
+  val conf = new SparkConf().set("spark.ui.showConsoleProgress", "false")
+
+  val sc = new SparkContext("local[1]", "PhoenixSparkTest", conf)
+
+  def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
+    val query = "SELECT %s FROM \"%s\"" format(columns.map(f => "\"" + f + "\"").mkString(", "), table)
+
+    query + (predicate match {
+      case Some(p: String) => " WHERE " + p
+      case _ => ""
+    })
+  }
+
+  test("Can create valid SQL") {
+    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
+      conf = hbaseConfiguration)
+
+    rdd.buildSql("MyTable", Array("Foo", "Bar"), None) should
+      equal("SELECT \"Foo\", \"Bar\" FROM \"MyTable\"")
+  }
+
+  test("Can convert Phoenix schema") {
+    val phoenixSchema = List(
+      new ColumnInfo("varcharColumn", PVarchar.INSTANCE.getSqlType)
+    )
+
+    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
+      conf = hbaseConfiguration)
+
+    val catalystSchema = rdd.phoenixSchemaToCatalystSchema(phoenixSchema)
+
+    val expected = List(StructField("varcharColumn", StringType, nullable = true))
+
+    catalystSchema shouldEqual expected
+  }
+
+  test("Can create schema RDD and execute query") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+
+    df1.registerTempTable("sql_table_1")
+
+    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
+      conf = hbaseConfiguration)
+
+    df2.registerTempTable("sql_table_2")
+
+    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 6L
+  }
+
+  test("Can create schema RDD and execute query on case sensitive table (no config)") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"), zkUrl = Some(hbaseConnectionString))
+
+    df1.registerTempTable("table3")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 2L
+  }
+
+  test("Can create schema RDD and execute constrained query") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
+
+    df1.registerTempTable("sql_table_1")
+
+    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
+      predicate = Some("\"ID\" = 1"),
+      conf = hbaseConfiguration)
+
+    df2.registerTempTable("sql_table_2")
+
+    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 1L
+  }
+
+  test("Using a predicate referring to a non-existent column should fail") {
+    intercept[RuntimeException] {
+      val sqlContext = new SQLContext(sc)
+
+      val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+        predicate = Some("foo = bar"),
+        conf = hbaseConfiguration)
+
+      df1.registerTempTable("table3")
+
+      val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+      // we have to execute an action before the predicate failure can occur
+      val count = sqlRdd.count()
+    }.getCause shouldBe a [ColumnNotFoundException]
+  }
+
+  test("Can create schema RDD with predicate that will never match") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
+      predicate = Some("\"id\" = -1"),
+      conf = hbaseConfiguration)
+
+    df1.registerTempTable("table3")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 0L
+  }
+
+  test("Can create schema RDD with complex predicate") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("DATE_PREDICATE_TEST_TABLE", Array("ID", "TIMESERIES_KEY"),
+      predicate = Some("ID > 0 AND TIMESERIES_KEY BETWEEN CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)"),
+      conf = hbaseConfiguration)
+    
+    df1.registerTempTable("date_predicate_test_table")
+
+    val sqlRdd = df1.sqlContext.sql("SELECT * FROM date_predicate_test_table")
+
+    val count = sqlRdd.count()
+
+    count shouldEqual 0L
+  }
+
+  test("Can query an array table") {
+    val sqlContext = new SQLContext(sc)
+
+    val df1 = sqlContext.phoenixTableAsDataFrame("ARRAY_TEST_TABLE", Array("ID", "VCARRAY"),
+      conf = hbaseConfiguration)
+
+    df1.registerTempTable("ARRAY_TEST_TABLE")
+
+    val sqlRdd = sqlContext.sql("SELECT * FROM ARRAY_TEST_TABLE")
+
+    val count = sqlRdd.count()
+
+    // get row 0, column 1, which should be "VCARRAY"
+    val arrayValues = sqlRdd.collect().apply(0).apply(1)
+
+    arrayValues should equal(Array("String1", "String2", "String3"))
+
+    count shouldEqual 1L
+  }
+  
+  test("Can read a table as an RDD") {
+    val rdd1 = sc.phoenixTableAsRDD("ARRAY_TEST_TABLE", Seq("ID", "VCARRAY"),
+      conf = hbaseConfiguration)
+
+    val count = rdd1.count()
+
+    val arrayValues = rdd1.take(1)(0)("VCARRAY")
+
+    arrayValues should equal(Array("String1", "String2", "String3"))
+
+    count shouldEqual 1L
+  }
+
+  test("Can save to phoenix table") {
+    val sqlContext = new SQLContext(sc)
+
+    val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
+
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "OUTPUT_TEST_TABLE",
+        Seq("ID","COL1","COL2"),
+        hbaseConfiguration
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT ID, COL1, COL2 FROM OUTPUT_TEST_TABLE")
+    val results = ListBuffer[(Long, String, Int)]()
+    while(rs.next()) {
+      results.append((rs.getLong(1), rs.getString(2), rs.getInt(3)))
+    }
+    stmt.close()
+
+    // Verify they match
+    (0 to results.size - 1).foreach { i =>
+      dataSet(i) shouldEqual results(i)
+    }
+  }
+
+  test("Can save Java and Joda dates to Phoenix (no config)") {
+    val dt = new DateTime()
+    val date = new Date()
+
+    val dataSet = List((1L, "1", 1, dt), (2L, "2", 2, date))
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "OUTPUT_TEST_TABLE",
+        Seq("ID","COL1","COL2","COL3"),
+        zkUrl = Some(hbaseConnectionString)
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT COL3 FROM OUTPUT_TEST_TABLE WHERE ID = 1 OR ID = 2 ORDER BY ID ASC")
+    val results = ListBuffer[java.sql.Date]()
+    while(rs.next()) {
+      results.append(rs.getDate(1))
+    }
+    stmt.close()
+
+    // Verify the epochs are equal
+    results(0).getTime shouldEqual dt.getMillis
+    results(1).getTime shouldEqual date.getTime
+  }
+
+  test("Not specifying a zkUrl or a config quorum URL should fail") {
+    intercept[UnsupportedOperationException] {
+      val sqlContext = new SQLContext(sc)
+      val badConf = new Configuration(hbaseConfiguration)
+      badConf.unset(HConstants.ZOOKEEPER_QUORUM)
+      sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = badConf)
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f666baa2/phoenix-spark/src/test/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/test/resources/log4j.xml b/phoenix-spark/src/test/resources/log4j.xml
deleted file mode 100644
index d4799da..0000000
--- a/phoenix-spark/src/test/resources/log4j.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-  <appender name="console" class="org.apache.log4j.ConsoleAppender">
-    <param name="Target" value="System.out"/>
-
-    <layout class="org.apache.log4j.PatternLayout">
-      <param name="ConversionPattern" value="%-4r [%t] %-5p %c %x - %m%n"/>
-    </layout>
-  </appender>
-
-  <logger name="org.eclipse">
-    <level value="ERROR"/>
-  </logger>
-
-  <logger name="org.apache">
-    <level value="ERROR"/>
-  </logger>
-
-  <logger name = "org.apache.phoenix.mapreduce">
-    <level value="FATAL"/>
-  </logger>
-
-  <logger name="org.mortbay">
-    <level value="ERROR"/>
-  </logger>
-
-  <logger name="BlockStateChange">
-    <level value="ERROR"/>
-  </logger>
-
-  <logger name="io.netty">
-    <level value="ERROR"/>
-  </logger>
-
-  <root>
-    <priority value="INFO"/>
-    <appender-ref ref="console"/>
-  </root>
-</log4j:configuration>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f666baa2/phoenix-spark/src/test/resources/setup.sql
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/test/resources/setup.sql b/phoenix-spark/src/test/resources/setup.sql
deleted file mode 100644
index 14a7e7e..0000000
--- a/phoenix-spark/src/test/resources/setup.sql
+++ /dev/null
@@ -1,18 +0,0 @@
-CREATE TABLE table1 (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR)
-CREATE TABLE table2 (id BIGINT NOT NULL PRIMARY KEY, table1_id BIGINT, "t2col1" VARCHAR)
-UPSERT INTO table1 (id, col1) VALUES (1, 'test_row_1')
-UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (1, 1, 'test_child_1')
-UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (2, 1, 'test_child_2')
-UPSERT INTO table1 (id, col1) VALUES (2, 'test_row_2')
-UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (3, 2, 'test_child_1')
-UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (4, 2, 'test_child_2')
-UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (5, 2, 'test_child_3')
-UPSERT INTO table2 (id, table1_id, "t2col1") VALUES (6, 2, 'test_child_4')
-CREATE TABLE "table3" ("id" BIGINT NOT NULL PRIMARY KEY, "col1" VARCHAR)
-UPSERT INTO "table3" ("id", "col1") VALUES (1, 'foo')
-UPSERT INTO "table3" ("id", "col1") VALUES (2, 'bar')
-CREATE TABLE ARRAY_TEST_TABLE (ID BIGINT NOT NULL PRIMARY KEY, VCARRAY VARCHAR[])
-UPSERT INTO ARRAY_TEST_TABLE (ID, VCARRAY) VALUES (1, ARRAY['String1', 'String2', 'String3'])
-CREATE TABLE DATE_PREDICATE_TEST_TABLE (ID BIGINT NOT NULL, TIMESERIES_KEY TIMESTAMP NOT NULL CONSTRAINT pk PRIMARY KEY (ID, TIMESERIES_KEY))
-UPSERT INTO DATE_PREDICATE_TEST_TABLE (ID, TIMESERIES_KEY) VALUES (1, CAST(CURRENT_TIME() AS TIMESTAMP))
-CREATE TABLE OUTPUT_TEST_TABLE (id BIGINT NOT NULL PRIMARY KEY, col1 VARCHAR, col2 INTEGER, col3 DATE)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f666baa2/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala b/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
deleted file mode 100644
index 63cb6e4..0000000
--- a/phoenix-spark/src/test/scala/org/apache/phoenix/spark/PhoenixRDDTest.scala
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
- */
-package org.apache.phoenix.spark
-
-import java.sql.{Connection, DriverManager}
-import java.util.Date
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hbase.{HConstants, HBaseTestingUtility}
-import org.apache.phoenix.schema.ColumnNotFoundException
-import org.apache.phoenix.schema.types.PVarchar
-import org.apache.phoenix.util.ColumnInfo
-import org.apache.spark.sql.SQLContext
-import org.apache.spark.sql.types.{StringType, StructField}
-import org.apache.spark.{SparkConf, SparkContext}
-import org.joda.time.DateTime
-import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
-import org.apache.phoenix.spark._
-
-import scala.collection.mutable.ListBuffer
-
-class PhoenixRDDTest extends FunSuite with Matchers with BeforeAndAfterAll {
-  lazy val hbaseTestingUtility = {
-    new HBaseTestingUtility()
-  }
-
-  lazy val hbaseConfiguration = {
-    val conf = hbaseTestingUtility.getConfiguration
-
-    val quorum = conf.get("hbase.zookeeper.quorum")
-    val clientPort = conf.get("hbase.zookeeper.property.clientPort")
-    val znodeParent = conf.get("zookeeper.znode.parent")
-
-    // This is an odd one - the Zookeeper Quorum entry in the config is totally wrong. It's
-    // just reporting localhost.
-    conf.set(org.apache.hadoop.hbase.HConstants.ZOOKEEPER_QUORUM, s"$quorum:$clientPort:$znodeParent")
-
-    conf
-  }
-
-  lazy val quorumAddress = {
-    hbaseConfiguration.get("hbase.zookeeper.quorum")
-  }
-
-  lazy val zookeeperClientPort = {
-    hbaseConfiguration.get("hbase.zookeeper.property.clientPort")
-  }
-
-  lazy val zookeeperZnodeParent = {
-    hbaseConfiguration.get("zookeeper.znode.parent")
-  }
-
-  lazy val hbaseConnectionString = {
-    s"$quorumAddress:$zookeeperClientPort:$zookeeperZnodeParent"
-  }
-
-  var conn: Connection = _
-
-  override def beforeAll() {
-    hbaseTestingUtility.startMiniCluster()
-
-    conn = DriverManager.getConnection(s"jdbc:phoenix:$hbaseConnectionString")
-
-    conn.setAutoCommit(true)
-
-    // each SQL statement used to set up Phoenix must be on a single line. Yes, that
-    // can potentially make large lines.
-    val setupSqlSource = getClass.getClassLoader.getResourceAsStream("setup.sql")
-
-    val setupSql = scala.io.Source.fromInputStream(setupSqlSource).getLines()
-
-    for (sql <- setupSql) {
-      val stmt = conn.createStatement()
-
-      stmt.execute(sql)
-
-      stmt.close()
-    }
-
-    conn.commit()
-  }
-
-  override def afterAll() {
-    conn.close()
-    hbaseTestingUtility.shutdownMiniCluster()
-  }
-
-  val conf = new SparkConf().set("spark.ui.showConsoleProgress", "false")
-
-  val sc = new SparkContext("local[1]", "PhoenixSparkTest", conf)
-
-  def buildSql(table: String, columns: Seq[String], predicate: Option[String]): String = {
-    val query = "SELECT %s FROM \"%s\"" format(columns.map(f => "\"" + f + "\"").mkString(", "), table)
-
-    query + (predicate match {
-      case Some(p: String) => " WHERE " + p
-      case _ => ""
-    })
-  }
-
-  test("Can create valid SQL") {
-    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
-      conf = hbaseConfiguration)
-
-    rdd.buildSql("MyTable", Array("Foo", "Bar"), None) should
-      equal("SELECT \"Foo\", \"Bar\" FROM \"MyTable\"")
-  }
-
-  test("Can convert Phoenix schema") {
-    val phoenixSchema = List(
-      new ColumnInfo("varcharColumn", PVarchar.INSTANCE.getSqlType)
-    )
-
-    val rdd = new PhoenixRDD(sc, "MyTable", Array("Foo", "Bar"),
-      conf = hbaseConfiguration)
-
-    val catalystSchema = rdd.phoenixSchemaToCatalystSchema(phoenixSchema)
-
-    val expected = List(StructField("varcharColumn", StringType, nullable = true))
-
-    catalystSchema shouldEqual expected
-  }
-
-  test("Can create schema RDD and execute query") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
-
-    df1.registerTempTable("sql_table_1")
-
-    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
-      conf = hbaseConfiguration)
-
-    df2.registerTempTable("sql_table_2")
-
-    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 6L
-  }
-
-  test("Can create schema RDD and execute query on case sensitive table (no config)") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"), zkUrl = Some(hbaseConnectionString))
-
-    df1.registerTempTable("table3")
-
-    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 2L
-  }
-
-  test("Can create schema RDD and execute constrained query") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
-
-    df1.registerTempTable("sql_table_1")
-
-    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
-      predicate = Some("\"ID\" = 1"),
-      conf = hbaseConfiguration)
-
-    df2.registerTempTable("sql_table_2")
-
-    val sqlRdd = sqlContext.sql("SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1 INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 1L
-  }
-
-  test("Using a predicate referring to a non-existent column should fail") {
-    intercept[RuntimeException] {
-      val sqlContext = new SQLContext(sc)
-
-      val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
-        predicate = Some("foo = bar"),
-        conf = hbaseConfiguration)
-
-      df1.registerTempTable("table3")
-
-      val sqlRdd = sqlContext.sql("SELECT * FROM table3")
-
-      // we have to execute an action before the predicate failure can occur
-      val count = sqlRdd.count()
-    }.getCause shouldBe a [ColumnNotFoundException]
-  }
-
-  test("Can create schema RDD with predicate that will never match") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("table3", Array("id", "col1"),
-      predicate = Some("\"id\" = -1"),
-      conf = hbaseConfiguration)
-
-    df1.registerTempTable("table3")
-
-    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 0L
-  }
-
-  test("Can create schema RDD with complex predicate") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("DATE_PREDICATE_TEST_TABLE", Array("ID", "TIMESERIES_KEY"),
-      predicate = Some("ID > 0 AND TIMESERIES_KEY BETWEEN CAST(TO_DATE('1990-01-01 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP) AND CAST(TO_DATE('1990-01-30 00:00:01', 'yyyy-MM-dd HH:mm:ss') AS TIMESTAMP)"),
-      conf = hbaseConfiguration)
-    
-    df1.registerTempTable("date_predicate_test_table")
-
-    val sqlRdd = df1.sqlContext.sql("SELECT * FROM date_predicate_test_table")
-
-    val count = sqlRdd.count()
-
-    count shouldEqual 0L
-  }
-
-  test("Can query an array table") {
-    val sqlContext = new SQLContext(sc)
-
-    val df1 = sqlContext.phoenixTableAsDataFrame("ARRAY_TEST_TABLE", Array("ID", "VCARRAY"),
-      conf = hbaseConfiguration)
-
-    df1.registerTempTable("ARRAY_TEST_TABLE")
-
-    val sqlRdd = sqlContext.sql("SELECT * FROM ARRAY_TEST_TABLE")
-
-    val count = sqlRdd.count()
-
-    // get row 0, column 1, which should be "VCARRAY"
-    val arrayValues = sqlRdd.collect().apply(0).apply(1)
-
-    arrayValues should equal(Array("String1", "String2", "String3"))
-
-    count shouldEqual 1L
-  }
-  
-  test("Can read a table as an RDD") {
-    val rdd1 = sc.phoenixTableAsRDD("ARRAY_TEST_TABLE", Seq("ID", "VCARRAY"),
-      conf = hbaseConfiguration)
-
-    val count = rdd1.count()
-
-    val arrayValues = rdd1.take(1)(0)("VCARRAY")
-
-    arrayValues should equal(Array("String1", "String2", "String3"))
-
-    count shouldEqual 1L
-  }
-
-  test("Can save to phoenix table") {
-    val sqlContext = new SQLContext(sc)
-
-    val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
-
-    sc
-      .parallelize(dataSet)
-      .saveToPhoenix(
-        "OUTPUT_TEST_TABLE",
-        Seq("ID","COL1","COL2"),
-        hbaseConfiguration
-      )
-
-    // Load the results back
-    val stmt = conn.createStatement()
-    val rs = stmt.executeQuery("SELECT ID, COL1, COL2 FROM OUTPUT_TEST_TABLE")
-    val results = ListBuffer[(Long, String, Int)]()
-    while(rs.next()) {
-      results.append((rs.getLong(1), rs.getString(2), rs.getInt(3)))
-    }
-    stmt.close()
-
-    // Verify they match
-    (0 to results.size - 1).foreach { i =>
-      dataSet(i) shouldEqual results(i)
-    }
-  }
-
-  test("Can save Java and Joda dates to Phoenix (no config)") {
-    val dt = new DateTime()
-    val date = new Date()
-
-    val dataSet = List((1L, "1", 1, dt), (2L, "2", 2, date))
-    sc
-      .parallelize(dataSet)
-      .saveToPhoenix(
-        "OUTPUT_TEST_TABLE",
-        Seq("ID","COL1","COL2","COL3"),
-        zkUrl = Some(hbaseConnectionString)
-      )
-
-    // Load the results back
-    val stmt = conn.createStatement()
-    val rs = stmt.executeQuery("SELECT COL3 FROM OUTPUT_TEST_TABLE WHERE ID = 1 OR ID = 2 ORDER BY ID ASC")
-    val results = ListBuffer[java.sql.Date]()
-    while(rs.next()) {
-      results.append(rs.getDate(1))
-    }
-    stmt.close()
-
-    // Verify the epochs are equal
-    results(0).getTime shouldEqual dt.getMillis
-    results(1).getTime shouldEqual date.getTime
-  }
-
-  test("Not specifying a zkUrl or a config quorum URL should fail") {
-    intercept[UnsupportedOperationException] {
-      val sqlContext = new SQLContext(sc)
-      val badConf = new Configuration(hbaseConfiguration)
-      badConf.unset(HConstants.ZOOKEEPER_QUORUM)
-      sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = badConf)
-    }
-  }
-}
\ No newline at end of file


[04/50] [abbrv] phoenix git commit: Fix IndexExpressionIT test failures

Posted by ma...@apache.org.
Fix IndexExpressionIT test failures


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b256fde0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b256fde0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b256fde0

Branch: refs/heads/calcite
Commit: b256fde0eab4813372c085491fdb845336dd0074
Parents: 1a13784
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Thu Mar 26 12:24:46 2015 -0700
Committer: Thomas D'Silva <tw...@gmail.com>
Committed: Thu Mar 26 12:24:46 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/end2end/index/IndexExpressionIT.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b256fde0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 0203e35..1a5fbcc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -480,7 +480,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             String expectedPlan = "CLIENT PARALLEL 1-WAY "
                     + (localIndex ? "RANGE SCAN OVER _LOCAL_IDX_" + fullDataTableName + " [-32768]"
                             : "FULL SCAN OVER INDEX_TEST.IDX")
-                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + B.INT_COL2))]\nCLIENT MERGE SORT";
+                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT(\"(A.INT_COL1 + B.INT_COL2)\")]\nCLIENT MERGE SORT";
             assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
             rs = conn.createStatement().executeQuery(groupBySql);
             assertTrue(rs.next());
@@ -531,7 +531,7 @@ public class IndexExpressionIT extends BaseHBaseManagedTimeIT {
             String expectedPlan = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
                     + (localIndex ? "_LOCAL_IDX_" + fullDataTableName + " [-32768,0] - [-32768,*]"
                             : "INDEX_TEST.IDX [0] - [*]")
-                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + 1))]\nCLIENT MERGE SORT";
+                    + "\n    SERVER FILTER BY FIRST KEY ONLY\n    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT(\"(A.INT_COL1 + 1)\")]\nCLIENT MERGE SORT";
             assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
             rs = conn.createStatement().executeQuery(sql);
             assertTrue(rs.next());


[08/50] [abbrv] phoenix git commit: PHOENIX-1783 Fix IDE compiler errors for JodaTimezoneCacheTest

Posted by ma...@apache.org.
PHOENIX-1783 Fix IDE compiler errors for JodaTimezoneCacheTest


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2c0ed104
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2c0ed104
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2c0ed104

Branch: refs/heads/calcite
Commit: 2c0ed1045c84375a09e21e6f39ff96b48fd4e519
Parents: b9002b7
Author: James Taylor <ja...@apache.org>
Authored: Fri Mar 27 16:29:00 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Fri Mar 27 16:29:00 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/cache/JodaTimezoneCacheTest.java   | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c0ed104/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
index f388703..e9b6c67 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java
@@ -15,12 +15,14 @@
  */
 package org.apache.phoenix.cache;
 
+import static org.junit.Assert.assertNotNull;
+
 import java.nio.ByteBuffer;
+
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.schema.IllegalDataException;
 import org.joda.time.DateTimeZone;
-import static org.junit.Assert.assertTrue;
 import org.junit.Test;
 
 public class JodaTimezoneCacheTest {
@@ -28,13 +30,13 @@ public class JodaTimezoneCacheTest {
     @Test
     public void testGetInstanceByteBufferUTC() {
         DateTimeZone instance = JodaTimezoneCache.getInstance(ByteBuffer.wrap(Bytes.toBytes("UTC")));
-        assertTrue(instance instanceof DateTimeZone);
+        assertNotNull(instance);
     }
 
     @Test
     public void testGetInstanceString() {
         DateTimeZone instance = JodaTimezoneCache.getInstance("America/St_Vincent");
-        assertTrue(instance instanceof DateTimeZone);
+        assertNotNull(instance);
     }
 
     @Test(expected = IllegalDataException.class)
@@ -46,6 +48,6 @@ public class JodaTimezoneCacheTest {
     public void testGetInstanceImmutableBytesWritable() {
         ImmutableBytesWritable ptr = new ImmutableBytesWritable(Bytes.toBytes("Europe/Isle_of_Man"));
         DateTimeZone instance = JodaTimezoneCache.getInstance(ptr);
-        assertTrue(instance instanceof DateTimeZone);
+        assertNotNull(instance);
     }
 }


[09/50] [abbrv] phoenix git commit: PHOENIX-1790 Fix test failures due to incorrect shadowing of @AfterClass methods.

Posted by ma...@apache.org.
PHOENIX-1790 Fix test failures due to incorrect shadowing of @AfterClass methods.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2bf8c678
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2bf8c678
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2bf8c678

Branch: refs/heads/calcite
Commit: 2bf8c6788efb6dad7513f7bb14d2e9d75d7b50e3
Parents: 2c0ed10
Author: Samarth <sa...@salesforce.com>
Authored: Mon Mar 30 09:01:14 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Mon Mar 30 09:01:14 2015 -0700

----------------------------------------------------------------------
 .../src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java     | 2 +-
 .../src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java     | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bf8c678/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
index c079a30..deb14db 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
@@ -57,7 +57,7 @@ public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
     
     @AfterClass
-    public static void doTeardown() throws Exception {
+    public static void cleanUpAfterTestSuite() throws Exception {
         TestPhoenixIndexRpcSchedulerFactory.reset();
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bf8c678/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index de0ab84..b04f636 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -76,7 +76,7 @@ public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
     
     @AfterClass
-    public static void doTeardown() throws Exception {
+    public static void cleanUpAfterTestSuite() throws Exception {
         TestPhoenixIndexRpcSchedulerFactory.reset();
     }
     


[10/50] [abbrv] phoenix git commit: PHOENIX-1792 Add Week() and Hour() built-ins(Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1792 Add Week() and Hour() built-ins(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d05d7c86
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d05d7c86
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d05d7c86

Branch: refs/heads/calcite
Commit: d05d7c86c1de64a88cf8fb7ff119b1e1208f9929
Parents: 2bf8c67
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Tue Mar 31 14:15:09 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Tue Mar 31 14:15:09 2015 +0530

----------------------------------------------------------------------
 .../end2end/YearMonthSecondFunctionIT.java      | 36 +++++++++
 .../phoenix/expression/ExpressionType.java      |  6 +-
 .../expression/function/HourFunction.java       | 81 +++++++++++++++++++
 .../expression/function/WeekFunction.java       | 83 ++++++++++++++++++++
 4 files changed, 205 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d05d7c86/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
index da745fe..3742a17 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/YearMonthSecondFunctionIT.java
@@ -168,4 +168,40 @@ public class YearMonthSecondFunctionIT extends BaseHBaseManagedTimeIT {
         assertEquals(50, rs.getInt(6));
         assertFalse(rs.next());
     }
+
+    @Test
+    public void testWeekFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('2004-03-01 00:00:10'), TO_TIMESTAMP('2006-04-12 00:00:20'), TO_TIME('2008-05-16 10:00:30'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, WEEK(dates), WEEK(times) FROM T1 where WEEK(timestamps)=15");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(9, rs.getInt(2));
+        assertEquals(20, rs.getInt(3));
+        assertFalse(rs.next());
+    }
+    
+    @Test
+    public void testHourFuncAgainstColumns() throws Exception {
+        String ddl =
+                "CREATE TABLE IF NOT EXISTS T1 (k1 INTEGER NOT NULL, dates DATE, timestamps TIMESTAMP, times TIME CONSTRAINT pk PRIMARY KEY (k1))";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO T1 VALUES (1, TO_DATE('Sat, 3 Feb 2008 03:05:06 GMT', 'EEE, d MMM yyyy HH:mm:ss z', 'UTC'), TO_TIMESTAMP('2006-04-12 15:10:20'), " +
+                "TO_TIME('2008-05-16 20:40:30'))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("SELECT k1, HOUR(dates), HOUR(timestamps), HOUR(times) FROM T1");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertEquals(3, rs.getInt(2));
+        assertEquals(15, rs.getInt(3));
+        assertEquals(20, rs.getInt(4));
+        assertFalse(rs.next());
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d05d7c86/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 92dd1d4..8a2f127 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.expression.function.FirstValueFunction;
 import org.apache.phoenix.expression.function.FloorDateExpression;
 import org.apache.phoenix.expression.function.FloorDecimalExpression;
 import org.apache.phoenix.expression.function.FloorFunction;
+import org.apache.phoenix.expression.function.HourFunction;
 import org.apache.phoenix.expression.function.IndexStateNameFunction;
 import org.apache.phoenix.expression.function.InvertFunction;
 import org.apache.phoenix.expression.function.LTrimFunction;
@@ -84,6 +85,7 @@ import org.apache.phoenix.expression.function.ToTimestampFunction;
 import org.apache.phoenix.expression.function.TrimFunction;
 import org.apache.phoenix.expression.function.TruncFunction;
 import org.apache.phoenix.expression.function.UpperFunction;
+import org.apache.phoenix.expression.function.WeekFunction;
 import org.apache.phoenix.expression.function.YearFunction;
 
 import com.google.common.collect.Maps;
@@ -199,7 +201,9 @@ public enum ExpressionType {
     SignFunction(SignFunction.class),
     YearFunction(YearFunction.class),
     MonthFunction(MonthFunction.class),
-    SecondFunction(SecondFunction.class)
+    SecondFunction(SecondFunction.class),
+    WeekFunction(WeekFunction.class),
+    HourFunction(HourFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d05d7c86/phoenix-core/src/main/java/org/apache/phoenix/expression/function/HourFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/HourFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/HourFunction.java
new file mode 100644
index 0000000..0e9efd8
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/HourFunction.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PTimestamp;
+
+/**
+ * 
+ * Implementation of the HOUR() buildin. Input Date/Timestamp/Time.
+ * Returns an integer from 0 to 23 representing the hour component of time
+ * 
+ */
+@BuiltInFunction(name=HourFunction.NAME, 
+args={@Argument(allowedTypes={PTimestamp.class})})
+public class HourFunction extends ScalarFunction {
+    public static final String NAME = "HOUR";
+
+    public HourFunction() {
+    }
+
+    public HourFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression expression = getChildExpression();
+        if (!expression.evaluate(tuple, ptr)) {
+            return false;
+        }
+        if ( ptr.getLength() == 0) {
+            return true; //means null
+        }
+        long dateTime = expression.getDataType().getCodec().decodeLong(ptr, expression.getSortOrder());
+        int hour = (int)(((dateTime/1000) % (24*3600))/3600);
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeInt(hour, byteValue, 0);
+        ptr.set(byteValue);
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    private Expression getChildExpression() {
+        return children.get(0);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d05d7c86/phoenix-core/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java
new file mode 100644
index 0000000..126aba8
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.joda.time.DateTime;
+
+/**
+ * 
+ * Implementation of the WEEK() buildin. Input Date/Timestamp.
+ * Returns an integer from 1 to 53 representing the week of the year in date
+ * 
+ */
+@BuiltInFunction(name=WeekFunction.NAME, 
+args={@Argument(allowedTypes={PTimestamp.class})})
+public class WeekFunction extends ScalarFunction {
+    public static final String NAME = "WEEK";
+
+    public WeekFunction() {
+    }
+
+    public WeekFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression expression = getChildExpression();
+        if (!expression.evaluate(tuple, ptr)) {
+            return false;
+        }
+        if ( ptr.getLength() == 0) {
+            return true; //means null
+        }
+        long dateTime = expression.getDataType().getCodec().decodeLong(ptr, expression.getSortOrder());
+        DateTime dt = new DateTime(dateTime);
+        int week = dt.getWeekOfWeekyear();
+        PDataType returnType = getDataType();
+        byte[] byteValue = new byte[returnType.getByteSize()];
+        returnType.getCodec().encodeInt(week, byteValue, 0);
+        ptr.set(byteValue);
+        return true;
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PInteger.INSTANCE;
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    private Expression getChildExpression() {
+        return children.get(0);
+    }
+}


[25/50] [abbrv] phoenix git commit: PHOENIX-1580 Support UNION ALL

Posted by ma...@apache.org.
PHOENIX-1580 Support UNION ALL


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5ea3607c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5ea3607c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5ea3607c

Branch: refs/heads/calcite
Commit: 5ea3607c7e0603e1e26b7e14ed166ea67818c038
Parents: c50feca
Author: James Taylor <ja...@apache.org>
Authored: Mon Apr 6 11:31:58 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Mon Apr 6 11:31:58 2015 -0700

----------------------------------------------------------------------
 .../phoenix/iterate/MergeSortTopNResultIterator.java     | 11 +----------
 1 file changed, 1 insertion(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5ea3607c/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
index 87a6a62..4c4097f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java
@@ -31,28 +31,19 @@ import org.apache.phoenix.schema.tuple.Tuple;
  * returning the rows ordered by the OrderByExpression. The input
  * iterators must be ordered by the OrderByExpression.
  *
- * 
- * @since 0.1
  */
 public class MergeSortTopNResultIterator extends MergeSortResultIterator {
 
     private final int limit;
-    private final boolean clientSideOnly;
     private int count = 0;
     private final List<OrderByExpression> orderByColumns;
     private final ImmutableBytesWritable ptr1 = new ImmutableBytesWritable();
     private final ImmutableBytesWritable ptr2 = new ImmutableBytesWritable();
     
-    public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit,
-            List<OrderByExpression> orderByColumns, boolean clientSideOnly) {
+    public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit, List<OrderByExpression> orderByColumns) {
         super(iterators);
         this.limit = limit == null ? -1 : limit;
         this.orderByColumns = orderByColumns;
-        this.clientSideOnly = clientSideOnly;
-    }
-
-    public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit, List<OrderByExpression> orderByColumns) {
-        this(iterators, limit, orderByColumns, false);
     }
 
     @Override


[17/50] [abbrv] phoenix git commit: PHOENIX-1798 UnsupportedOperationException throws from BaseResultIterators.getIterators (Cen Qi)

Posted by ma...@apache.org.
PHOENIX-1798 UnsupportedOperationException throws from BaseResultIterators.getIterators (Cen Qi)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4859fb9d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4859fb9d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4859fb9d

Branch: refs/heads/calcite
Commit: 4859fb9d66ccdeb4224bf7d52895368d9592d5fa
Parents: 1f942b1
Author: Samarth <sa...@salesforce.com>
Authored: Thu Apr 2 16:26:50 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Apr 2 16:26:50 2015 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/iterate/BaseResultIterators.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4859fb9d/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index a120143..9ac6a29 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -556,7 +556,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
                             // Add any concatIterators that were successful so far
                             // as we need these to be in order
                             addIterator(iterators, concatIterators);
-                            concatIterators = Collections.emptyList();
+                            concatIterators = Lists.newArrayList();
                             submitWork(newNestedScans, newFutures, allIterators, newNestedScans.size());
                             allFutures.add(newFutures);
                             for (List<Pair<Scan,Future<PeekingResultIterator>>> newFuture : reverseIfNecessary(newFutures, isReverse)) {


[18/50] [abbrv] phoenix git commit: PHOENIX-1799 Provide parameter metadata for prepared create table statements

Posted by ma...@apache.org.
PHOENIX-1799 Provide parameter metadata for prepared create table statements


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb732711
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb732711
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb732711

Branch: refs/heads/calcite
Commit: eb7327113c1310af209015cf84caec339930ae3e
Parents: 4859fb9
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 2 13:32:47 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 2 16:34:41 2015 -0700

----------------------------------------------------------------------
 .../phoenix/compile/CreateTableCompiler.java    | 37 ++++++++++++++++++++
 .../phoenix/compile/QueryMetaDataTest.java      | 11 ++++++
 2 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb732711/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
index edee788..a5adc49 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
@@ -41,6 +41,7 @@ import org.apache.phoenix.expression.RowKeyColumnExpression;
 import org.apache.phoenix.expression.visitor.StatelessTraverseNoExpressionVisitor;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.BindParseNode;
 import org.apache.phoenix.parse.ColumnParseNode;
 import org.apache.phoenix.parse.CreateTableStatement;
 import org.apache.phoenix.parse.ParseNode;
@@ -50,11 +51,15 @@ import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.query.DelegateConnectionQueryServices;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.QueryUtil;
 
@@ -62,6 +67,7 @@ import com.google.common.collect.Iterators;
 
 
 public class CreateTableCompiler {
+    private static final PDatum VARBINARY_DATUM = new VarbinaryDatum();
     private final PhoenixStatement statement;
     
     public CreateTableCompiler(PhoenixStatement statement) {
@@ -151,6 +157,9 @@ public class CreateTableCompiler {
         ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
         for (int i = 0; i < splits.length; i++) {
             ParseNode node = splitNodes.get(i);
+            if (node instanceof BindParseNode) {
+                context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM);
+            }
             if (node.isStateless()) {
                 Expression expression = node.accept(expressionCompiler);
                 if (expression.evaluate(null, ptr)) {;
@@ -302,4 +311,32 @@ public class CreateTableCompiler {
         }
         
     }
+    private static class VarbinaryDatum implements PDatum {
+
+        @Override
+        public boolean isNullable() {
+            return false;
+        }
+
+        @Override
+        public PDataType getDataType() {
+            return PVarbinary.INSTANCE;
+        }
+
+        @Override
+        public Integer getMaxLength() {
+            return null;
+        }
+
+        @Override
+        public Integer getScale() {
+            return null;
+        }
+
+        @Override
+        public SortOrder getSortOrder() {
+            return SortOrder.getDefault();
+        }
+        
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb732711/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java
index 73ba2a4..bf11b72 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java
@@ -441,4 +441,15 @@ public class QueryMetaDataTest extends BaseConnectionlessQueryTest {
         assertEquals(String.class.getName(), pmd.getParameterClassName(2));
         assertEquals(String.class.getName(), pmd.getParameterClassName(3));
     }
+    
+    @Test
+    public void testBindParamMetaDataForCreateTable() throws Exception {
+        String ddl = "CREATE TABLE foo (k VARCHAR PRIMARY KEY) SPLIT ON (?, ?)";
+        Connection conn = DriverManager.getConnection(PHOENIX_JDBC_URL, PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
+        PreparedStatement statement = conn.prepareStatement(ddl);
+        ParameterMetaData pmd = statement.getParameterMetaData();
+        assertEquals(2, pmd.getParameterCount());
+        assertEquals(byte[].class.getName(), pmd.getParameterClassName(1));
+        assertEquals(byte[].class.getName(), pmd.getParameterClassName(2));
+    }
 }


[30/50] [abbrv] phoenix git commit: PHOENIX-1071 - Lowering memory usage for Spark integration tests to see if build passes

Posted by ma...@apache.org.
PHOENIX-1071 - Lowering memory usage for Spark integration tests to see if build passes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5c32d195
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5c32d195
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5c32d195

Branch: refs/heads/calcite
Commit: 5c32d1955f7289d38813424163f3b472ae0fe304
Parents: ff0e8e4
Author: Mujtaba <mu...@apache.org>
Authored: Tue Apr 7 16:58:12 2015 -0700
Committer: Mujtaba <mu...@apache.org>
Committed: Tue Apr 7 16:58:12 2015 -0700

----------------------------------------------------------------------
 phoenix-spark/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5c32d195/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index fd0ccaf..412f59a 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -502,7 +502,7 @@
             <configuration>
               <parallel>true</parallel>
               <tagsToExclude>Integration-Test</tagsToExclude>
-              <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
+              <argLine>-Xmx2g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
             </configuration>
           </execution>
           <execution>


[03/50] [abbrv] phoenix git commit: PHOENIX-1770 Correct exit code from bin scripts

Posted by ma...@apache.org.
PHOENIX-1770 Correct exit code from bin scripts

Make the python scripts under bin/ exit with the exit code that
was returned from the underlying java command.

Contributed by Mark Tse.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1a137842
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1a137842
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1a137842

Branch: refs/heads/calcite
Commit: 1a13784228f05ebe51e52ad80ae464d8531c2fe0
Parents: f941e89
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Thu Mar 26 08:43:48 2015 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu Mar 26 08:45:49 2015 +0100

----------------------------------------------------------------------
 bin/end2endTest.py |  3 ++-
 bin/performance.py | 13 ++++++++++---
 bin/psql.py        |  3 ++-
 3 files changed, 14 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1a137842/bin/end2endTest.py
----------------------------------------------------------------------
diff --git a/bin/end2endTest.py b/bin/end2endTest.py
index 96886c7..a5993dc 100755
--- a/bin/end2endTest.py
+++ b/bin/end2endTest.py
@@ -44,4 +44,5 @@ java_cmd = "java -cp " + hbase_config_path + os.pathsep + phoenix_jar_path + os.
     hbase_library_path + " org.apache.phoenix.end2end.End2EndTestDriver " + \
     ' '.join(sys.argv[1:])
 
-subprocess.call(java_cmd, shell=True)
+exitcode = subprocess.call(java_cmd, shell=True)
+sys.exit(exitcode)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1a137842/bin/performance.py
----------------------------------------------------------------------
diff --git a/bin/performance.py b/bin/performance.py
index c69edfd..b9df433 100755
--- a/bin/performance.py
+++ b/bin/performance.py
@@ -85,7 +85,9 @@ print "-----------------------------------------"
 print "\nCreating performance table..."
 createFileWithContent(ddl, createtable)
 
-subprocess.call(execute + ddl, shell=True)
+exitcode = subprocess.call(execute + ddl, shell=True)
+if exitcode != 0:
+    sys.exit(exitcode)
 
 # Write real,user,sys time on console for the following queries
 queryex("1 - Count", "SELECT COUNT(1) FROM %s;" % (table))
@@ -95,11 +97,16 @@ queryex("4 - Truncate + Group By", "SELECT TRUNC(DATE,'DAY') DAY FROM %s GROUP B
 queryex("5 - Filter + Count", "SELECT COUNT(1) FROM %s WHERE CORE<10;" % (table))
 
 print "\nGenerating and upserting data..."
-subprocess.call('java -jar %s %s' % (phoenix_utils.testjar, rowcount), shell=True)
+exitcode = subprocess.call('java -jar %s %s' % (phoenix_utils.testjar, rowcount), shell=True)
+if exitcode != 0:
+    sys.exit(exitcode)
+
 print "\n"
 createFileWithContent(qry, statements)
 
-subprocess.call(execute + data + ' ' + qry, shell=True)
+exitcode = subprocess.call(execute + data + ' ' + qry, shell=True)
+if exitcode != 0:
+    sys.exit(exitcode)
 
 # clear temporary files
 delfile(ddl)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1a137842/bin/psql.py
----------------------------------------------------------------------
diff --git a/bin/psql.py b/bin/psql.py
index 34a95df..247001a 100755
--- a/bin/psql.py
+++ b/bin/psql.py
@@ -39,4 +39,5 @@ java_cmd = 'java -cp "' + phoenix_utils.hbase_conf_path + os.pathsep + phoenix_u
     os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
     " org.apache.phoenix.util.PhoenixRuntime " + args 
 
-subprocess.call(java_cmd, shell=True)
+exitcode = subprocess.call(java_cmd, shell=True)
+sys.exit(exitcode)


[45/50] [abbrv] phoenix git commit: PHOENIX-1867 Add joni library to server assembly files (Shuxiong Ye)

Posted by ma...@apache.org.
PHOENIX-1867 Add joni library to server assembly files (Shuxiong Ye)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/29522506
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/29522506
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/29522506

Branch: refs/heads/calcite
Commit: 295225060535d3030789a13817d55040b3db99de
Parents: b0c28a2
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Apr 15 00:38:45 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Apr 15 00:38:45 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/src/build/server-without-antlr.xml | 2 ++
 phoenix-assembly/src/build/server.xml               | 2 ++
 2 files changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/29522506/phoenix-assembly/src/build/server-without-antlr.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/server-without-antlr.xml b/phoenix-assembly/src/build/server-without-antlr.xml
index 1750d1c..072ade0 100644
--- a/phoenix-assembly/src/build/server-without-antlr.xml
+++ b/phoenix-assembly/src/build/server-without-antlr.xml
@@ -36,6 +36,8 @@
       <includes>
         <include>org.apache.phoenix:phoenix-core</include>
         <include>org.iq80.snappy:snappy</include>
+        <include>org.jruby.joni:joni</include>
+        <include>org.jruby.jcodings:jcodings</include>
       </includes>
     </dependencySet>
   </dependencySets>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29522506/phoenix-assembly/src/build/server.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/server.xml b/phoenix-assembly/src/build/server.xml
index 5ec2106..12d3d81 100644
--- a/phoenix-assembly/src/build/server.xml
+++ b/phoenix-assembly/src/build/server.xml
@@ -36,6 +36,8 @@
       <includes>
         <include>org.apache.phoenix:phoenix-core</include>
         <include>org.iq80.snappy:snappy</include>
+        <include>org.jruby.joni:joni</include>
+        <include>org.jruby.jcodings:jcodings</include>
       </includes>
     </dependencySet>
     <dependencySet>