You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ma...@apache.org on 2015/06/15 20:45:41 UTC

[01/50] [abbrv] phoenix git commit: PHOENIX-1930 [BW COMPAT] Queries hangs with client on Phoenix 4.3.0 and server on 4.x-HBase-0.98

Repository: phoenix
Updated Branches:
  refs/heads/calcite f9ddb988c -> 62d6720f7


PHOENIX-1930 [BW COMPAT] Queries hangs with client on Phoenix 4.3.0 and server on 4.x-HBase-0.98


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/864faba6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/864faba6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/864faba6

Branch: refs/heads/calcite
Commit: 864faba6d6091136d6776f1d81cd5264d3a0e14e
Parents: 064b7af
Author: Thomas <td...@salesforce.com>
Authored: Wed Apr 29 11:16:41 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Wed Apr 29 11:42:09 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/expression/ExpressionType.java   | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/864faba6/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index d5cf745..843a768 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -143,7 +143,7 @@ public enum ExpressionType {
     SumAggregateFunction(SumAggregateFunction.class),
     MinAggregateFunction(MinAggregateFunction.class),
     MaxAggregateFunction(MaxAggregateFunction.class),
-    LikeExpression(LikeExpression.class),
+    StringBasedLikeExpression(StringBasedLikeExpression.class),
     NotExpression(NotExpression.class),
     CaseExpression(CaseExpression.class),
     InListExpression(InListExpression.class),
@@ -159,9 +159,9 @@ public enum ExpressionType {
     LongDivideExpression(LongDivideExpression.class),
     DecimalDivideExpression(DecimalDivideExpression.class),
     CoalesceFunction(CoalesceFunction.class),
-    RegexpReplaceFunction(RegexpReplaceFunction.class),
+    StringBasedRegexpReplaceFunction(StringBasedRegexpReplaceFunction.class),
     SQLTypeNameFunction(SqlTypeNameFunction.class),
-    RegexpSubstrFunction(RegexpSubstrFunction.class),
+    StringBasedRegexpSubstrFunction(StringBasedRegexpSubstrFunction.class),
     StringConcatExpression(StringConcatExpression.class),
     LengthFunction(LengthFunction.class),
     LTrimFunction(LTrimFunction.class),
@@ -206,7 +206,7 @@ public enum ExpressionType {
     SQLIndexTypeFunction(SQLIndexTypeFunction.class),
     ModulusExpression(ModulusExpression.class),
     DistinctValueAggregateFunction(DistinctValueAggregateFunction.class),
-    RegexpSplitFunction(RegexpSplitFunction.class),
+    StringBasedRegexpSplitFunction(StringBasedRegexpSplitFunction.class),
     RandomFunction(RandomFunction.class),
     ToTimeFunction(ToTimeFunction.class),
     ToTimestampFunction(ToTimestampFunction.class),
@@ -214,10 +214,6 @@ public enum ExpressionType {
     ByteBasedRegexpReplaceFunction(ByteBasedRegexpReplaceFunction.class),
     ByteBasedRegexpSubstrFunction(ByteBasedRegexpSubstrFunction.class),
     ByteBasedRegexpSplitFunction(ByteBasedRegexpSplitFunction.class),
-    StringBasedLikeExpression(StringBasedLikeExpression.class),
-    StringBasedRegexpReplaceFunction(StringBasedRegexpReplaceFunction.class),
-    StringBasedRegexpSubstrFunction(StringBasedRegexpSubstrFunction.class),
-    StringBasedRegexpSplitFunction(StringBasedRegexpSplitFunction.class),
     SignFunction(SignFunction.class),
     YearFunction(YearFunction.class),
     MonthFunction(MonthFunction.class),


[42/50] [abbrv] phoenix git commit: PHOENIX-1987 SIGN built-in function should be order preserving (Shuxiong Ye)

Posted by ma...@apache.org.
PHOENIX-1987 SIGN built-in function should be order preserving (Shuxiong Ye)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/47466e31
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/47466e31
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/47466e31

Branch: refs/heads/calcite
Commit: 47466e317db72d651c120b1c04bf687abfe10e34
Parents: 6c3d50a
Author: James Taylor <ja...@apache.org>
Authored: Thu Jun 4 14:24:06 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Thu Jun 4 14:24:06 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/expression/function/SignFunction.java    | 5 +++++
 .../java/org/apache/phoenix/expression/SignFunctionTest.java    | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/47466e31/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SignFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SignFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SignFunction.java
index 0b470f8..a11eaff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SignFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SignFunction.java
@@ -71,4 +71,9 @@ public class SignFunction extends ScalarFunction {
     public String getName() {
         return NAME;
     }
+
+    @Override
+    public OrderPreserving preservesOrder() {
+        return OrderPreserving.YES;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/47466e31/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java
index 37d6e1d..e4a5f80 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java
@@ -54,7 +54,8 @@ public class SignFunctionTest {
         Expression signFunction = new SignFunction(expressions);
         ImmutableBytesWritable ptr = new ImmutableBytesWritable();
         signFunction.evaluate(null, ptr);
-        Integer result = (Integer) signFunction.getDataType().toObject(ptr);
+        Integer result =
+                (Integer) signFunction.getDataType().toObject(ptr, signFunction.getSortOrder());
         assertTrue(result.compareTo(expected) == 0);
     }
 


[20/50] [abbrv] phoenix git commit: PHOENIX-1990 bin/queryserver makeWinServiceDesc doesn't actually work in Windows

Posted by ma...@apache.org.
PHOENIX-1990 bin/queryserver makeWinServiceDesc doesn't actually work in Windows


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c83ab9ed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c83ab9ed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c83ab9ed

Branch: refs/heads/calcite
Commit: c83ab9edba7b417a001fb702de5d893cbda95f29
Parents: 6fc53b5
Author: Nick Dimiduk <nd...@apache.org>
Authored: Mon May 18 16:00:31 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Mon May 18 16:00:31 2015 -0700

----------------------------------------------------------------------
 bin/queryserver.py | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c83ab9ed/bin/queryserver.py
----------------------------------------------------------------------
diff --git a/bin/queryserver.py b/bin/queryserver.py
index 6a18741..7666246 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -78,11 +78,22 @@ phoenix_out_file = '%s.out' % phoenix_file_basename
 phoenix_pid_file = '%s.pid' % phoenix_file_basename
 opts = os.getenv('PHOENIX_QUERYSERVER_OPTS', '')
 
-# load hbase-env.sh to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
-hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
+hbase_env_path = None
+hbase_env_cmd  = None
+if os.name == 'posix':
+    hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
+    hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
+elif os.name == 'nt':
+    hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
+    hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
+if not hbase_env_path or not hbase_env_cmd:
+    print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
+    sys.exit(-1)
+
 hbase_env = {}
 if os.path.isfile(hbase_env_path):
-    p = subprocess.Popen(['bash', '-c', 'source %s && env' % hbase_env_path], stdout = subprocess.PIPE)
+    p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
     for x in p.stdout:
         (k, _, v) = x.partition('=')
         hbase_env[k.strip()] = v.strip()


[21/50] [abbrv] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

Posted by ma...@apache.org.
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a4b4e0e2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a4b4e0e2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a4b4e0e2

Branch: refs/heads/calcite
Commit: a4b4e0e2d862d5d4ee0f3a6f9587f53fe87d629f
Parents: c83ab9e
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed May 20 09:53:53 2015 -0700

----------------------------------------------------------------------
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --------------
 .../covered/filter/TestFamilyOnlyFilter.java    | 106 -------------------
 2 files changed, 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4b4e0e2/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-    this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-    super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-    return done;
-  }
-
-  @Override
-  public void reset() {
-    done = false;
-    previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-    if (done) {
-      return ReturnCode.SKIP;
-    }
-    ReturnCode code = super.filterKeyValue(v);
-    if (previousMatchFound) {
-      // we found a match before, and now we are skipping the key because of the family, therefore
-      // we are done (no more of the family).
-      if (code.equals(ReturnCode.SKIP)) {
-      done = true;
-      }
-    } else {
-      // if we haven't seen a match before, then it doesn't matter what we see now, except to mark
-      // if we've seen a match
-      if (code.equals(ReturnCode.INCLUDE)) {
-        previousMatchFound = true;
-      }
-    }
-    return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4b4e0e2/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.covered.filter.FamilyOnlyFilter;
-import org.junit.Test;
-
-/**
- * Test that the family only filter only allows a single family through
- */
-public class TestFamilyOnlyFilter {
-
-  byte[] row = new byte[] { 'a' };
-  byte[] qual = new byte[] { 'b' };
-  byte[] val = Bytes.toBytes("val");
-
-  @Test
-  public void testPassesFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family!", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testPassesTargetFamilyAsNonFirstFamily() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    kv = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-  }
-
-  @Test
-  public void testResetFilter() {
-    byte[] fam = Bytes.toBytes("fam");
-    byte[] fam2 = Bytes.toBytes("fam2");
-    byte[] fam3 = Bytes.toBytes("way_after_family");
-
-    FamilyOnlyFilter filter = new FamilyOnlyFilter(fam2);
-
-    KeyValue kv = new KeyValue(row, fam, qual, 10, val);
-
-    ReturnCode code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    KeyValue accept = new KeyValue(row, fam2, qual, 10, val);
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family", ReturnCode.INCLUDE, code);
-
-    kv = new KeyValue(row, fam3, qual, 10, val);
-    code = filter.filterKeyValue(kv);
-    assertEquals("Didn't filter out non-matching family!", ReturnCode.SKIP, code);
-
-    // we shouldn't match the family again - everything after a switched family should be ignored
-    code = filter.filterKeyValue(accept);
-    assertEquals("Should have skipped a 'matching' family if it arrives out of order",
-      ReturnCode.SKIP, code);
-
-    // reset the filter and we should accept it again
-    filter.reset();
-    code = filter.filterKeyValue(accept);
-    assertEquals("Didn't pass matching family after reset", ReturnCode.INCLUDE, code);
-  }
-}


[33/50] [abbrv] phoenix git commit: PHOENIX-2013 Apply PHOENIX-1995 to runnable uberjar as well

Posted by ma...@apache.org.
PHOENIX-2013 Apply PHOENIX-1995 to runnable uberjar as well


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/160e9497
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/160e9497
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/160e9497

Branch: refs/heads/calcite
Commit: 160e9497dcef541af0e0a9aacf93eed9acb7f8ca
Parents: 170e8cc
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed May 27 11:27:04 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed May 27 11:27:04 2015 -0700

----------------------------------------------------------------------
 phoenix-server/src/build/query-server-runnable.xml | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/160e9497/phoenix-server/src/build/query-server-runnable.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/src/build/query-server-runnable.xml b/phoenix-server/src/build/query-server-runnable.xml
index e2a3dc4..ef22b14 100644
--- a/phoenix-server/src/build/query-server-runnable.xml
+++ b/phoenix-server/src/build/query-server-runnable.xml
@@ -28,6 +28,15 @@
     <format>jar</format>
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
+  <containerDescriptorHandlers>
+    <containerDescriptorHandler>
+      <!--
+          aggregate SPI's so that things like HDFS FileSystem works in uberjar
+          http://docs.oracle.com/javase/tutorial/sound/SPI-intro.html
+      -->
+      <handlerName>metaInf-services</handlerName>
+    </containerDescriptorHandler>
+  </containerDescriptorHandlers>
   <dependencySets>
     <dependencySet>
       <outputDirectory>/</outputDirectory>


[08/50] [abbrv] phoenix git commit: PHOENIX-1944 PQS secure login only executed when debug is enabled

Posted by ma...@apache.org.
PHOENIX-1944 PQS secure login only executed when debug is enabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b47dcb66
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b47dcb66
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b47dcb66

Branch: refs/heads/calcite
Commit: b47dcb66055642559b9dd75f5647473329df432f
Parents: 1fa09dc
Author: Nick Dimiduk <nd...@apache.org>
Authored: Fri May 1 14:23:42 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Fri May 1 14:23:42 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/queryserver/server/Main.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b47dcb66/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
----------------------------------------------------------------------
diff --git a/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java b/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
index b099f91..55febc5 100644
--- a/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
+++ b/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
@@ -121,8 +121,8 @@ public final class Main extends Configured implements Tool, Runnable {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Login to " + hostname + " using " + getConf().get(KEYTAB_FILENAME_KEY)
               + " and principal " + getConf().get(KERBEROS_PRINCIPAL_KEY) + ".");
-          SecurityUtil.login(getConf(), KEYTAB_FILENAME_KEY, KERBEROS_PRINCIPAL_KEY, hostname);
         }
+        SecurityUtil.login(getConf(), KEYTAB_FILENAME_KEY, KERBEROS_PRINCIPAL_KEY, hostname);
         LOG.info("Login successful.");
       }
       Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(


[38/50] [abbrv] phoenix git commit: PHOENIX-2016 Some Phoenix tests failed with NPE(Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-2016 Some Phoenix tests failed with NPE(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dc3083fe
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dc3083fe
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dc3083fe

Branch: refs/heads/calcite
Commit: dc3083fec11720a3b92f3edf98a679406004550f
Parents: 82df3b9
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Mon Jun 1 21:33:47 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Mon Jun 1 21:33:47 2015 +0530

----------------------------------------------------------------------
 phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dc3083fe/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index b0574c3..fa78656 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -1627,6 +1627,7 @@ public abstract class BaseTest {
      * Disable and drop all the tables except SYSTEM.CATALOG and SYSTEM.SEQUENCE
      */
     private static void disableAndDropNonSystemTables() throws Exception {
+        if (driver == null) return;
         HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin();
         try {
             HTableDescriptor[] tables = admin.listTables();


[05/50] [abbrv] phoenix git commit: PHOENIX-1856 Include min row key for each region in stats row-addendum_1(Ram)

Posted by ma...@apache.org.
PHOENIX-1856 Include min row key for each region in stats row-addendum_1(Ram)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/70de0cd4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/70de0cd4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/70de0cd4

Branch: refs/heads/calcite
Commit: 70de0cd485705ecc1f8b7864fe3657c4e8408d36
Parents: efd7c9f
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Apr 30 16:25:33 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Apr 30 16:25:33 2015 +0530

----------------------------------------------------------------------
 .../java/org/apache/phoenix/schema/stats/StatisticsCollector.java  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/70de0cd4/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 8e41d4e..272cac6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -284,6 +284,8 @@ public class StatisticsCollector {
     public void getMinKey(ImmutableBytesWritable ptr) {
         if (minKey != null) {
             ptr.set(minKey, minKeyOffset, minKeyLength);
+        } else {
+            ptr.set(HConstants.EMPTY_BYTE_ARRAY);
         }
     }
 }


[14/50] [abbrv] phoenix git commit: PHOENIX-1875 implement ARRAY_PREPEND built in function (Dumindu)

Posted by ma...@apache.org.
PHOENIX-1875 implement ARRAY_PREPEND built in function (Dumindu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b5ef25c9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b5ef25c9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b5ef25c9

Branch: refs/heads/calcite
Commit: b5ef25c942fb0f4ab9a6fec66e821c5c3473ea46
Parents: 978b232
Author: ramkrishna <ra...@gmail.com>
Authored: Wed May 13 10:46:19 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Wed May 13 10:46:19 2015 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/ArrayPrependFunctionIT.java | 652 +++++++++++++++++++
 .../phoenix/expression/ExpressionType.java      |   4 +-
 .../function/ArrayAppendFunction.java           |  35 +-
 .../function/ArrayModifierFunction.java         |  75 +++
 .../function/ArrayPrependFunction.java          |  96 +++
 .../phoenix/schema/types/PArrayDataType.java    | 161 ++++-
 .../expression/ArrayPrependFunctionTest.java    | 552 ++++++++++++++++
 7 files changed, 1541 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayPrependFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayPrependFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayPrependFunctionIT.java
new file mode 100644
index 0000000..3145d95
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayPrependFunctionIT.java
@@ -0,0 +1,652 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.*;
+
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.junit.Test;
+
+public class ArrayPrependFunctionIT extends BaseHBaseManagedTimeIT {
+
+    private void initTableWithVarArray(Connection conn, String type, Object[] objectArray, String value) throws SQLException {
+        conn.createStatement().execute("CREATE TABLE t ( k VARCHAR PRIMARY KEY, a " + type + "[],b " + type + ")");
+        conn.commit();
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?," + value + ")");
+        PhoenixArray array = (PhoenixArray) conn.createArrayOf(type, objectArray);
+        stmt.setString(1, "a");
+        stmt.setArray(2, array);
+        stmt.execute();
+        conn.commit();
+
+    }
+
+    private void initTables(Connection conn) throws Exception {
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[],integers INTEGER[],doubles DOUBLE[],bigints BIGINT[],chars CHAR(15)[],double1 DOUBLE,char1 CHAR(17),nullcheck INTEGER,chars2 CHAR(15)[])";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO regions(region_name,varchars,integers,doubles,bigints,chars,double1,char1,nullcheck,chars2) VALUES('SF Bay Area'," +
+                "ARRAY['2345','46345','23234']," +
+                "ARRAY[2345,46345,23234,456]," +
+                "ARRAY[23.45,46.345,23.234,45.6,5.78]," +
+                "ARRAY[12,34,56,78,910]," +
+                "ARRAY['a','bbbb','c','ddd','e']," +
+                "23.45," +
+                "'wert'," +
+                "NULL," +
+                "ARRAY['foo','a','bbbb','c','ddd','e']" +
+                ")";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.execute();
+        conn.commit();
+    }
+
+    private void initTablesDesc(Connection conn, String type, String val) throws Exception {
+        String ddl = "CREATE TABLE regions (pk " + type + " PRIMARY KEY DESC,varchars VARCHAR[],integers INTEGER[],doubles DOUBLE[],bigints BIGINT[],chars CHAR(15)[],chars2 CHAR(15)[], bools BOOLEAN[])";
+        conn.createStatement().execute(ddl);
+        String dml = "UPSERT INTO regions(pk,varchars,integers,doubles,bigints,chars,chars2,bools) VALUES(" + val + "," +
+                "ARRAY['2345','46345','23234']," +
+                "ARRAY[2345,46345,23234,456]," +
+                "ARRAY[23.45,46.345,23.234,45.6,5.78]," +
+                "ARRAY[12,34,56,78,910]," +
+                "ARRAY['a','bbbb','c','ddd','e']," +
+                "ARRAY['a','bbbb','c','ddd','e','foo']," +
+                "ARRAY[true,false]" +
+                ")";
+        PreparedStatement stmt = conn.prepareStatement(dml);
+        stmt.execute();
+        conn.commit();
+    }
+
+    @Test
+    public void testArrayPrependFunctionInteger() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(1234,integers) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{1234, 2345, 46345, 23234, 456};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionVarchar() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND('34567',varchars) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"34567", "2345", "46345", "23234"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionNulls1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String[] s = new String[]{null, null, "1", "2"};
+        initTableWithVarArray(conn, "VARCHAR", s, null);
+        String[] s2 = new String[]{null, null, null, "1", "2"};
+        PhoenixArray array2 = (PhoenixArray) conn.createArrayOf("VARCHAR", s2);
+        conn = DriverManager.getConnection(getUrl());
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(b,a) FROM t WHERE k = 'a'");
+        assertTrue(rs.next());
+        assertEquals(array2, rs.getArray(1));
+    }
+
+    @Test
+    public void testArrayPrependFunctionNulls2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String[] s = new String[]{"1", "2"};
+        initTableWithVarArray(conn, "VARCHAR", s, null);
+        String[] s2 = new String[]{null, "1", "2"};
+        PhoenixArray array2 = (PhoenixArray) conn.createArrayOf("VARCHAR", s2);
+        conn = DriverManager.getConnection(getUrl());
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(b,a) FROM t WHERE k = 'a'");
+        assertTrue(rs.next());
+        assertEquals(array2, rs.getArray(1));
+    }
+
+    @Test
+    public void testArrayPrependFunctionNulls3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String[] s = new String[]{"176", null, "212"};
+        initTableWithVarArray(conn, "VARCHAR", s, null);
+        String[] s2 = new String[]{null, "176", null, "212"};
+        PhoenixArray array2 = (PhoenixArray) conn.createArrayOf("VARCHAR", s2);
+        conn = DriverManager.getConnection(getUrl());
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(b,a) FROM t WHERE k = 'a'");
+        assertTrue(rs.next());
+        assertEquals(array2, rs.getArray(1));
+    }
+
+    @Test
+    public void testArrayPrependFunctionNulls4() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        String[] s = new String[]{"176", null, "212"};
+        initTableWithVarArray(conn, "VARCHAR", s, "'foo'");
+        String[] s2 = new String[]{"foo", "176", null, "212"};
+        PhoenixArray array2 = (PhoenixArray) conn.createArrayOf("VARCHAR", s2);
+        conn = DriverManager.getConnection(getUrl());
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(b,a) FROM t WHERE k = 'a'");
+        assertTrue(rs.next());
+        assertEquals(array2, rs.getArray(1));
+    }
+
+    @Test
+    public void testArrayPrependFunctionDouble() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(double1,doubles) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{23.45, 23.45, 46.345, 23.234, 45.6, 5.78};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionDouble2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(23,doubles) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{new Double(23), 23.45, 46.345, 23.234, 45.6, 5.78};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionBigint() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(1112,bigints) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Long[] longs = new Long[]{1112l, 12l, 34l, 56l, 78l, 910l};
+
+        Array array = conn.createArrayOf("BIGINT", longs);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionChar() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND('fac',chars) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"fac", "a", "bbbb", "c", "ddd", "e"};
+
+        Array array = conn.createArrayOf("CHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test(expected = TypeMismatchException.class)
+    public void testArrayPrependFunctionIntToCharArray() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(234,varchars) FROM regions WHERE region_name = 'SF Bay Area'");
+    }
+
+    @Test(expected = TypeMismatchException.class)
+    public void testArrayPrependFunctionVarcharToIntegerArray() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND('234',integers) FROM regions WHERE region_name = 'SF Bay Area'");
+
+    }
+
+    @Test(expected = SQLException.class)
+    public void testArrayPrependFunctionChar2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND('facfacfacfacfacfacfac',chars) FROM regions WHERE region_name = 'SF Bay Area'");
+        rs.next();
+        rs.getArray(1);
+    }
+
+    @Test
+    public void testArrayPrependFunctionIntegerToDoubleArray() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(45,doubles) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{45.0, 23.45, 46.345, 23.234, 45.6, 5.78};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithNestedFunctions1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(integers[1],ARRAY[23,45]) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2345, 23, 45};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithNestedFunctions2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(ARRAY_ELEM(ARRAY[2,4],1),integers) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{2, 2345, 46345, 23234, 456};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithNestedFunctions3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(ARRAY_ELEM(doubles,2),doubles) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{46.345, 23.45, 46.345, 23.234, 45.6, 5.78};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithUpsert1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO regions(region_name,varchars) VALUES('SF Bay Area',ARRAY_PREPEND(':-)',ARRAY['hello','world']))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT varchars FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{":-)", "hello", "world"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithUpsert2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,integers INTEGER[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO regions(region_name,integers) VALUES('SF Bay Area',ARRAY_PREPEND(6,ARRAY[4,5]))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT integers FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{6, 4, 5};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithUpsert3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO regions(region_name,doubles) VALUES('SF Bay Area',ARRAY_PREPEND(9.0,ARRAY[5.67,7.87]))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT doubles FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{new Double(9), 5.67, 7.87};
+
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithUpsertSelect1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+        conn.createStatement().execute(ddl);
+
+        ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area',ARRAY_PREPEND(9.0,ARRAY[5.67,7.87]))";
+        conn.createStatement().execute(dml);
+
+        dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area2',ARRAY_PREPEND(9.2,ARRAY[56.7,7.87]))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        dml = "UPSERT INTO target(region_name, doubles) SELECT region_name, ARRAY_PREPEND(5,doubles) FROM source";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT doubles FROM target");
+        assertTrue(rs.next());
+
+        Double[] doubles = new Double[]{new Double(5), new Double(9), 5.67, 7.87};
+        Array array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertTrue(rs.next());
+
+        doubles = new Double[]{new Double(5), new Double(9.2), 56.7, 7.87};
+        array = conn.createArrayOf("DOUBLE", doubles);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionWithUpsertSelect2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+
+        String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+        conn.createStatement().execute(ddl);
+
+        ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+        conn.createStatement().execute(ddl);
+
+        String dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area',ARRAY_PREPEND('c',ARRAY['abcd','b']))";
+        conn.createStatement().execute(dml);
+
+        dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area2',ARRAY_PREPEND('something',ARRAY['d','fgh']))";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        dml = "UPSERT INTO target(region_name, varchars) SELECT region_name, ARRAY_PREPEND('stu',varchars) FROM source";
+        conn.createStatement().execute(dml);
+        conn.commit();
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT varchars FROM target");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"stu", "c", "abcd", "b"};
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertTrue(rs.next());
+
+        strings = new String[]{"stu", "something", "d", "fgh"};
+        array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[123,2345,46345,23234,456]=ARRAY_PREPEND(123,integers)");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE varchars[1]=ANY(ARRAY_PREPEND('1234',ARRAY['2345','46345','23234']))");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere3() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['1234','2345','46345','23234']=ARRAY_PREPEND('1234',ARRAY['2345','46345','23234'])");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere4() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[123.4,23.45,4634.5,2.3234]=ARRAY_PREPEND(123.4,ARRAY[23.45,4634.5,2.3234])");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere5() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['foo','2345','46345','23234']=ARRAY_PREPEND('foo',varchars)");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere6() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE chars2=ARRAY_PREPEND('foo',chars)");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionInWhere7() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[4,2,3]=ARRAY_PREPEND(4,ARRAY[2,3])");
+        assertTrue(rs.next());
+
+        assertEquals("SF Bay Area", rs.getString(1));
+        assertFalse(rs.next());
+    }
+
+    @Test(expected = SQLException.class)
+    public void testArrayPrependFunctionCharLimitCheck() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTables(conn);
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(char1,chars) FROM regions WHERE region_name = 'SF Bay Area'");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"wert", "a", "bbbb", "c", "ddd", "e"};
+
+        Array array = conn.createArrayOf("CHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionIntegerDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "INTEGER", "23");
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(pk,integers) FROM regions");
+        assertTrue(rs.next());
+
+        Integer[] integers = new Integer[]{23, 2345, 46345, 23234, 456};
+
+        Array array = conn.createArrayOf("INTEGER", integers);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+
+    }
+
+    @Test
+    public void testArrayPrependFunctionVarcharDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "VARCHAR", "'e'");
+
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(pk,varchars) FROM regions");
+        assertTrue(rs.next());
+
+        String[] strings = new String[]{"e", "2345", "46345", "23234"};
+
+        Array array = conn.createArrayOf("VARCHAR", strings);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionBigIntDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "BIGINT", "1112");
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(pk,bigints) FROM regions");
+        assertTrue(rs.next());
+
+        Long[] longs = new Long[]{1112l, 12l, 34l, 56l, 78l, 910l};
+
+        Array array = conn.createArrayOf("BIGINT", longs);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testArrayPrependFunctionBooleanDesc() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        initTablesDesc(conn, "BOOLEAN", "false");
+        ResultSet rs;
+        rs = conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(pk,bools) FROM regions");
+        assertTrue(rs.next());
+
+        Boolean[] booleans = new Boolean[]{false, true, false};
+
+        Array array = conn.createArrayOf("BOOLEAN", booleans);
+
+        assertEquals(array, rs.getArray(1));
+        assertFalse(rs.next());
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 71f0521..d7142e7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -25,6 +25,7 @@ import org.apache.phoenix.expression.function.ArrayAppendFunction;
 import org.apache.phoenix.expression.function.ArrayElemRefExpression;
 import org.apache.phoenix.expression.function.ArrayIndexFunction;
 import org.apache.phoenix.expression.function.ArrayLengthFunction;
+import org.apache.phoenix.expression.function.ArrayPrependFunction;
 import org.apache.phoenix.expression.function.ByteBasedRegexpReplaceFunction;
 import org.apache.phoenix.expression.function.ByteBasedRegexpSplitFunction;
 import org.apache.phoenix.expression.function.ByteBasedRegexpSubstrFunction;
@@ -229,7 +230,8 @@ public enum ExpressionType {
     MinuteFunction(MinuteFunction.class),
     DayOfMonthFunction(DayOfMonthFunction.class),
     ArrayAppendFunction(ArrayAppendFunction.class),
-    UDFExpression(UDFExpression.class)
+    UDFExpression(UDFExpression.class),
+    ArrayPrependFunction(ArrayPrependFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
index db92d61..bf6c29f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
@@ -33,7 +33,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
         @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class,
                 PVarbinaryArray.class}),
         @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}, defaultValue = "null")})
-public class ArrayAppendFunction extends ScalarFunction {
+public class ArrayAppendFunction extends ArrayModifierFunction {
 
     public static final String NAME = "ARRAY_APPEND";
 
@@ -42,21 +42,6 @@ public class ArrayAppendFunction extends ScalarFunction {
 
     public ArrayAppendFunction(List<Expression> children) throws TypeMismatchException {
         super(children);
-
-        if (getDataType() != null && !(getElementExpr() instanceof LiteralExpression && getElementExpr().isNullable()) && !getElementDataType().isCoercibleTo(getBaseType())) {
-            throw TypeMismatchException.newException(getBaseType(), getElementDataType());
-        }
-
-        // If the base type of an element is fixed width, make sure the element being appended will fit
-        if (getDataType() != null && getElementExpr().getDataType().getByteSize() == null && getElementDataType() != null && getBaseType().isFixedWidth() && getElementDataType().isFixedWidth() && getArrayExpr().getMaxLength() != null &&
-                getElementExpr().getMaxLength() != null && getElementExpr().getMaxLength() > getArrayExpr().getMaxLength()) {
-            throw new DataExceedsCapacityException("");
-        }
-        // If the base type has a scale, make sure the element being appended has a scale less than or equal to it
-        if (getDataType() != null && getArrayExpr().getScale() != null && getElementExpr().getScale() != null &&
-                getElementExpr().getScale() > getArrayExpr().getScale()) {
-            throw new DataExceedsCapacityException(getBaseType(), getArrayExpr().getMaxLength(), getArrayExpr().getScale());
-        }
     }
 
     @Override
@@ -78,12 +63,8 @@ public class ArrayAppendFunction extends ScalarFunction {
             return true;
         }
 
-        if (!getBaseType().isSizeCompatible(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getArrayExpr().getMaxLength(), getArrayExpr().getScale())) {
-            throw new DataExceedsCapacityException("");
-        }
-
-        getBaseType().coerceBytes(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getElementExpr().getSortOrder(), getArrayExpr().getMaxLength(), getArrayExpr().getScale(), getArrayExpr().getSortOrder());
-
+        checkSizeCompatibility(ptr);
+        coerceBytes(ptr);
         return PArrayDataType.appendItemToArray(ptr, length, offset, arrayBytes, getBaseType(), arrayLength, getMaxLength(), getArrayExpr().getSortOrder());
     }
 
@@ -114,14 +95,4 @@ public class ArrayAppendFunction extends ScalarFunction {
     public Expression getElementExpr() {
         return getChildren().get(1);
     }
-
-    public PDataType getBaseType() {
-        return PDataType.arrayBaseType(getArrayExpr().getDataType());
-    }
-
-    public PDataType getElementDataType() {
-        return getElementExpr().getDataType();
-    }
-
-
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
new file mode 100644
index 0000000..afd10e5
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.exception.DataExceedsCapacityException;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.types.*;
+
+public abstract class ArrayModifierFunction extends ScalarFunction {
+
+    public ArrayModifierFunction() {
+    }
+
+    public ArrayModifierFunction(List<Expression> children) throws TypeMismatchException {
+        super(children);
+
+        if (getDataType() != null && !(getElementExpr() instanceof LiteralExpression && getElementExpr().isNullable()) && !getElementDataType().isCoercibleTo(getBaseType())) {
+            throw TypeMismatchException.newException(getBaseType(), getElementDataType());
+        }
+
+        // If the base type of an element is fixed width, make sure the element being appended will fit
+        if (getDataType() != null && getElementExpr().getDataType().getByteSize() == null && getElementDataType() != null && getBaseType().isFixedWidth() && getElementDataType().isFixedWidth() && getArrayExpr().getMaxLength() != null &&
+                getElementExpr().getMaxLength() != null && getElementExpr().getMaxLength() > getArrayExpr().getMaxLength()) {
+            throw new DataExceedsCapacityException("");
+        }
+        // If the base type has a scale, make sure the element being appended has a scale less than or equal to it
+        if (getDataType() != null && getArrayExpr().getScale() != null && getElementExpr().getScale() != null &&
+                getElementExpr().getScale() > getArrayExpr().getScale()) {
+            throw new DataExceedsCapacityException(getBaseType(), getArrayExpr().getMaxLength(), getArrayExpr().getScale());
+        }
+    }
+
+    protected void checkSizeCompatibility(ImmutableBytesWritable ptr) {
+        if (!getBaseType().isSizeCompatible(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getArrayExpr().getMaxLength(), getArrayExpr().getScale())) {
+            throw new DataExceedsCapacityException("");
+        }
+    }
+
+    protected void coerceBytes(ImmutableBytesWritable ptr) {
+        getBaseType().coerceBytes(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getElementExpr().getSortOrder(), getArrayExpr().getMaxLength(), getArrayExpr().getScale(), getArrayExpr().getSortOrder());
+    }
+
+    public abstract Expression getArrayExpr();
+
+    public abstract Expression getElementExpr();
+
+    public PDataType getBaseType() {
+        return PDataType.arrayBaseType(getArrayExpr().getDataType());
+    }
+
+    public PDataType getElementDataType() {
+        return getElementExpr().getDataType();
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
new file mode 100644
index 0000000..3cea4df
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.*;
+
+@FunctionParseNode.BuiltInFunction(name = ArrayPrependFunction.NAME, args = {
+        @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}),
+        @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class,
+                PVarbinaryArray.class})})
+public class ArrayPrependFunction  extends ArrayModifierFunction {
+
+    public static final String NAME = "ARRAY_PREPEND";
+
+    public ArrayPrependFunction() {
+    }
+
+    public ArrayPrependFunction(List<Expression> children) throws TypeMismatchException {
+        super(children);
+    }
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+
+        if (!getArrayExpr().evaluate(tuple, ptr)) {
+            return false;
+        } else if (ptr.getLength() == 0) {
+            return true;
+        }
+        int arrayLength = PArrayDataType.getArrayLength(ptr, getBaseType(), getArrayExpr().getMaxLength());
+
+        int length = ptr.getLength();
+        int offset = ptr.getOffset();
+        byte[] arrayBytes = ptr.get();
+
+        getElementExpr().evaluate(tuple, ptr);
+
+        checkSizeCompatibility(ptr);
+        coerceBytes(ptr);
+        return PArrayDataType.prependItemToArray(ptr, length, offset, arrayBytes, getBaseType(), arrayLength, getMaxLength(), getArrayExpr().getSortOrder());
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return children.get(1).getDataType();
+    }
+
+    @Override
+    public Integer getMaxLength() {
+        return this.children.get(1).getMaxLength();
+    }
+
+    @Override
+    public SortOrder getSortOrder() {
+        return getChildren().get(1).getSortOrder();
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    @Override
+    public Expression getArrayExpr() {
+        return getChildren().get(1);
+    }
+
+    @Override
+    public Expression getElementExpr() {
+        return getChildren().get(0);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index c6861f7..86f22f7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -21,6 +21,7 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.text.Format;
+import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -512,7 +513,7 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
                 } else {
                     int off = newOffsetArrayPosition;
                     for (int arrayIndex = 0; arrayIndex < Math.abs(arrayLength) - 1; arrayIndex++) {
-                        Bytes.putInt(newArray, off, getOffset(arrayBytes, arrayIndex, true, offsetArrayPosition));
+                        Bytes.putInt(newArray, off, getOffset(arrayBytes, arrayIndex, true, offsetArrayPosition + offset));
                         off += Bytes.SIZEOF_INT;
                     }
 
@@ -543,6 +544,164 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
         Bytes.putByte(array, newOffsetArrayPosition + offsetArrayLength + byteSize + 2 * Bytes.SIZEOF_INT, header);
     }
 
+    public static boolean prependItemToArray(ImmutableBytesWritable ptr, int length, int offset, byte[] arrayBytes, PDataType baseType, int arrayLength, Integer maxLength, SortOrder sortOrder) {
+        int elementLength = maxLength == null ? ptr.getLength() : maxLength;
+        if (ptr.getLength() == 0) {
+            elementLength = 0;
+        }
+        //padding
+        if (elementLength > ptr.getLength()) {
+            baseType.pad(ptr, elementLength, sortOrder);
+        }
+        int elementOffset = ptr.getOffset();
+        byte[] elementBytes = ptr.get();
+
+        byte[] newArray;
+        if (!baseType.isFixedWidth()) {
+            int offsetArrayPosition = Bytes.toInt(arrayBytes, offset + length - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT);
+            int offsetArrayLength = length - offsetArrayPosition - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE;
+            arrayLength = Math.abs(arrayLength);
+
+            //checks whether offset array consists of shorts or integers
+            boolean useInt = offsetArrayLength / arrayLength == Bytes.SIZEOF_INT;
+            boolean convertToInt = false;
+            int endElementPosition = getOffset(arrayBytes, arrayLength - 1, !useInt, offsetArrayPosition + offset) + elementLength + Bytes.SIZEOF_BYTE;
+            int newOffsetArrayPosition;
+            int lengthIncrease;
+            int firstNonNullElementPosition = 0;
+            int currentPosition = 0;
+            //handle the case where prepended element is null
+            if (elementLength == 0) {
+                int nulls = 1;
+                //counts the number of nulls which are already at the beginning of the array
+                for (int index = 0; index < arrayLength; index++) {
+                    int currOffset = getOffset(arrayBytes, index, !useInt, offsetArrayPosition + offset);
+                    if (arrayBytes[offset + currOffset] == QueryConstants.SEPARATOR_BYTE) {
+                        nulls++;
+                    } else {
+                        //gets the offset of the first element after nulls at the beginning
+                        firstNonNullElementPosition = currOffset;
+                        break;
+                    }
+                }
+
+                int nMultiplesOver255 = nulls / 255;
+                int nRemainingNulls = nulls % 255;
+
+                //Calculates the increase in length due to prepending the null
+                //There is a length increase only when nRemainingNulls == 1
+                //nRemainingNulls == 1 and nMultiplesOver255 == 0 means there were no nulls at the beginning previously.
+                //At that case we need to increase the length by two bytes, one for separator byte and one for null count.
+                //ex: initial array - 65 0 66 0 0 0 after prepending null - 0 1(inverted) 65 0 66 0 0 0
+                //nRemainingNulls == 1 and nMultiplesOver255 != 0 means there were null at the beginning previously.
+                //In this case due to prepending nMultiplesOver255 is increased by 1.
+                //We need to increase the length by one byte to store increased that.
+                //ex: initial array - 0 1 65 0 66 0 0 0 after prepending null - 0 1 1(inverted) 65 0 66 0 0 0
+                //nRemainingNulls == 0 case.
+                //ex: initial array - 0 254(inverted) 65 0 66 0 0 0 after prepending null - 0 1 65 0 66 0 0 0
+                //nRemainingNulls > 1 case.
+                //ex: initial array - 0 45(inverted) 65 0 66 0 0 0 after prepending null - 0 46(inverted) 65 0 66 0 0 0
+                lengthIncrease = nRemainingNulls == 1 ? (nMultiplesOver255 == 0 ? 2 * Bytes.SIZEOF_BYTE : Bytes.SIZEOF_BYTE) : 0;
+                endElementPosition = getOffset(arrayBytes, arrayLength - 1, !useInt, offsetArrayPosition + offset) + lengthIncrease;
+                if (!useInt) {
+                    if (PArrayDataType.useShortForOffsetArray(endElementPosition)) {
+                        newArray = new byte[length + Bytes.SIZEOF_SHORT + lengthIncrease];
+                    } else {
+                        newArray = new byte[length + arrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT + lengthIncrease];
+                        convertToInt = true;
+                    }
+                } else {
+                    newArray = new byte[length + Bytes.SIZEOF_INT + lengthIncrease];
+                }
+                newArray[currentPosition] = QueryConstants.SEPARATOR_BYTE;
+                currentPosition++;
+
+                newOffsetArrayPosition = offsetArrayPosition + lengthIncrease;
+                while (nMultiplesOver255-- > 0) {
+                    newArray[currentPosition] = (byte) 1;
+                    currentPosition++;
+                }
+                // Write a byte for the remaining null elements
+                if (nRemainingNulls > 0) {
+                    byte nNullByte = SortOrder.invert((byte) (nRemainingNulls - 1));
+                    newArray[currentPosition] = nNullByte; // Single byte for repeating nulls
+                    currentPosition++;
+                }
+            } else {
+                if (!useInt) {
+                    if (PArrayDataType.useShortForOffsetArray(endElementPosition)) {
+                        newArray = new byte[length + elementLength + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BYTE];
+                    } else {
+                        newArray = new byte[length + elementLength + arrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE];
+                        convertToInt = true;
+                    }
+                } else {
+                    newArray = new byte[length + elementLength + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE];
+                }
+                newOffsetArrayPosition = offsetArrayPosition + Bytes.SIZEOF_BYTE + elementLength;
+
+                lengthIncrease = elementLength + Bytes.SIZEOF_BYTE;
+                System.arraycopy(elementBytes, elementOffset, newArray, 0, elementLength);
+                currentPosition += elementLength + Bytes.SIZEOF_BYTE;
+            }
+
+            System.arraycopy(arrayBytes, firstNonNullElementPosition + offset, newArray, currentPosition, offsetArrayPosition);
+
+            arrayLength = arrayLength + 1;
+            //writes the new offset and changes the previous offsets
+            if (useInt || convertToInt) {
+                writeNewOffsets(arrayBytes, newArray, false, !useInt, newOffsetArrayPosition, arrayLength, offsetArrayPosition, offset, lengthIncrease, length);
+            } else {
+                writeNewOffsets(arrayBytes, newArray, true, true, newOffsetArrayPosition, arrayLength, offsetArrayPosition, offset, lengthIncrease, length);
+            }
+        } else {
+            newArray = new byte[length + elementLength];
+
+            System.arraycopy(elementBytes, elementOffset, newArray, 0, elementLength);
+            System.arraycopy(arrayBytes, offset, newArray, elementLength, length);
+        }
+
+        ptr.set(newArray);
+        return true;
+    }
+
+    private static void writeNewOffsets(byte[] arrayBytes, byte[] newArray, boolean useShortNew, boolean useShortPrevious, int newOffsetArrayPosition, int arrayLength, int offsetArrayPosition, int offset, int offsetShift, int length) {
+        int currentPosition = newOffsetArrayPosition;
+        int offsetArrayElementSize = useShortNew ? Bytes.SIZEOF_SHORT : Bytes.SIZEOF_INT;
+        if (useShortNew) {
+            Bytes.putShort(newArray, currentPosition, (short) (0 - Short.MAX_VALUE));
+        } else {
+            Bytes.putInt(newArray, currentPosition, 0);
+        }
+
+        currentPosition += offsetArrayElementSize;
+        boolean nullsAtBeginning = true;
+        for (int arrayIndex = 0; arrayIndex < arrayLength - 1; arrayIndex++) {
+            int oldOffset = getOffset(arrayBytes, arrayIndex, useShortPrevious, offsetArrayPosition + offset);
+            if (arrayBytes[offset + oldOffset] == QueryConstants.SEPARATOR_BYTE && nullsAtBeginning) {
+                if (useShortNew) {
+                    Bytes.putShort(newArray, currentPosition, (short) (oldOffset - Short.MAX_VALUE));
+                } else {
+                    Bytes.putInt(newArray, currentPosition, oldOffset);
+                }
+            } else {
+                if (useShortNew) {
+                    Bytes.putShort(newArray, currentPosition, (short) (oldOffset + offsetShift - Short.MAX_VALUE));
+                } else {
+                    Bytes.putInt(newArray, currentPosition, oldOffset + offsetShift);
+                }
+                nullsAtBeginning = false;
+            }
+            currentPosition += offsetArrayElementSize;
+        }
+
+        Bytes.putInt(newArray, currentPosition, newOffsetArrayPosition);
+        currentPosition += Bytes.SIZEOF_INT;
+        Bytes.putInt(newArray, currentPosition, useShortNew ? arrayLength : -arrayLength);
+        currentPosition += Bytes.SIZEOF_INT;
+        Bytes.putByte(newArray, currentPosition, arrayBytes[offset + length - 1]);
+    }
+
     public static int serailizeOffsetArrayIntoStream(DataOutputStream oStream, TrustedByteArrayOutputStream byteStream,
             int noOfElements, int maxOffset, int[] offsetPos) throws IOException {
         int offsetPosition = (byteStream.size());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5ef25c9/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java
new file mode 100644
index 0000000..4d2f960
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java
@@ -0,0 +1,552 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.Calendar;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ArrayPrependFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.*;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class ArrayPrependFunctionTest {
+
+    private static void testExpression(LiteralExpression array, LiteralExpression element, PhoenixArray expected)
+            throws SQLException {
+        List<Expression> expressions = Lists.newArrayList((Expression) element);
+        expressions.add(array);
+
+        Expression arrayPrependFunction = new ArrayPrependFunction(expressions);
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        arrayPrependFunction.evaluate(null, ptr);
+        PhoenixArray result = (PhoenixArray) arrayPrependFunction.getDataType().toObject(ptr, expressions.get(1).getSortOrder(), array.getMaxLength(), array.getScale());
+        assertEquals(result, expected);
+    }
+
+    private static void test(PhoenixArray array, Object element, PDataType arrayDataType, Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) throws SQLException {
+        LiteralExpression arrayLiteral, elementLiteral;
+        arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, arraySortOrder, Determinism.ALWAYS);
+        elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, elementSortOrder, Determinism.ALWAYS);
+        testExpression(arrayLiteral, elementLiteral, expected);
+    }
+
+    @Test
+    public void testArrayPrependFunction1() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{5, 1, 2, -3, 4};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction2() throws Exception {
+        Object[] o = new Object[]{"1", "2", "3", "4"};
+        Object[] o2 = new Object[]{"56", "1", "2", "3", "4"};
+        Object element = "56";
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction3() throws Exception {
+        //offset array short to int transition
+        Object[] o = new Object[Short.MAX_VALUE + 1];
+        for (int i = 0; i < o.length; i++) {
+            o[i] = "a";
+        }
+        Object[] o2 = new Object[Short.MAX_VALUE + 2];
+        for (int i = 1; i < o2.length; i++) {
+            o2[i] = "a";
+        }
+        Object element = "b";
+        o2[0] = element;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction4() throws Exception {
+        //offset array int
+        Object[] o = new Object[Short.MAX_VALUE + 7];
+        for (int i = 0; i < o.length; i++) {
+            o[i] = "a";
+        }
+        Object[] o2 = new Object[Short.MAX_VALUE + 8];
+        for (int i = 1; i < o2.length; i++) {
+            o2[i] = "a";
+        }
+        Object element = "b";
+        o2[0] = element;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunctionBoolean() throws Exception {
+        Boolean[] o = new Boolean[]{true, false, false, true};
+        Boolean[] o2 = new Boolean[]{false, true, false, false, true};
+        Boolean element = false;
+        PDataType baseType = PBoolean.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE),
+                null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction6() throws Exception {
+        Object[] o = new Object[]{new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3)};
+        Object[] o2 = new Object[]{new Float(8.9), new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3)};
+        Object element = 8.9;
+        PDataType baseType = PFloat.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction7() throws Exception {
+        Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE};
+        Object[] o2 = new Object[]{12.67, 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE};
+        Object element = 12.67;
+        PDataType baseType = PDouble.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction8() throws Exception {
+        Object[] o = new Object[]{123l, 677l, 98789l, -78989l, 66787l};
+        Object[] o2 = new Object[]{543l, 123l, 677l, 98789l, -78989l, 66787l};
+        Object element = 543l;
+        PDataType baseType = PLong.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction9() throws Exception {
+        Object[] o = new Object[]{(short) 34, (short) -23, (short) -89, (short) 999, (short) 34};
+        Object[] o2 = new Object[]{(short) 7, (short) 34, (short) -23, (short) -89, (short) 999, (short) 34};
+        Object element = (short) 7;
+        PDataType baseType = PSmallint.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction10() throws Exception {
+        Object[] o = new Object[]{(byte) 4, (byte) 8, (byte) 9};
+        Object[] o2 = new Object[]{(byte) 6, (byte) 4, (byte) 8, (byte) 9};
+        Object element = (byte) 6;
+        PDataType baseType = PTinyint.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction11() throws Exception {
+        Object[] o = new Object[]{BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785)};
+        Object[] o2 = new Object[]{BigDecimal.valueOf(-19), BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785)};
+        Object element = BigDecimal.valueOf(-19);
+        PDataType baseType = PDecimal.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction12() throws Exception {
+        Calendar calendar = Calendar.getInstance();
+        java.util.Date currentDate = calendar.getTime();
+        java.sql.Date date = new java.sql.Date(currentDate.getTime());
+
+        Object[] o = new Object[]{date, date, date};
+        Object[] o2 = new Object[]{date, date, date, date};
+        PDataType baseType = PDate.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction13() throws Exception {
+        Calendar calendar = Calendar.getInstance();
+        java.util.Date currentDate = calendar.getTime();
+        java.sql.Time time = new java.sql.Time(currentDate.getTime());
+
+        Object[] o = new Object[]{time, time, time};
+        Object[] o2 = new Object[]{time, time, time, time};
+        PDataType baseType = PTime.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction14() throws Exception {
+        Calendar calendar = Calendar.getInstance();
+        java.util.Date currentDate = calendar.getTime();
+        java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime());
+
+        Object[] o = new Object[]{timestamp, timestamp, timestamp};
+        Object[] o2 = new Object[]{timestamp, timestamp, timestamp, timestamp};
+        PDataType baseType = PTimestamp.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction15() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{5, 1, 2, -3, 4};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction16() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{5, 1, 2, -3, 4};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction17() throws Exception {
+        Object[] o = new Object[]{1, 2, -3, 4};
+        Object[] o2 = new Object[]{5, 1, 2, -3, 4};
+        Object element = 5;
+        PDataType baseType = PInteger.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction18() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"5", "1", "2", "3", "4"};
+        Object element = "5";
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction19() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"5", "1", "2", "3", "4"};
+        Object element = "5";
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.ASC);
+    }
+
+    @Test
+    public void testArrayPrependFunction20() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"5", "1", "2", "3", "4"};
+        Object element = "5";
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction21() throws Exception {
+        Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE};
+        Object[] o2 = new Object[]{12.67, 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE};
+        Object element = 12.67;
+        PDataType baseType = PDouble.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction22() throws Exception {
+        byte[][] o = new byte[][]{new byte[]{2, 0, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}};
+        byte[][] o2 = new byte[][]{new byte[]{5, 6}, new byte[]{2, 0, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}};
+        byte[] element = new byte[]{5, 6};
+        PDataType baseType = PVarbinary.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction23() throws Exception {
+        byte[][] o = new byte[][]{new byte[]{2, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}};
+        byte[][] o2 = new byte[][]{new byte[]{5, 6}, new byte[]{2, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}};
+        byte[] element = new byte[]{5, 6};
+        PDataType baseType = PBinary.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 2, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testArrayPrependFunction24() throws Exception {
+        byte[][] o = new byte[][]{new byte[]{2, 0}, new byte[]{13, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}};
+        byte[][] o2 = new byte[][]{new byte[]{5, 6}, new byte[]{2, 0}, new byte[]{13, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}};
+        byte[] element = new byte[]{5, 6};
+        PDataType baseType = PBinary.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 3, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWithNoNullsAtBeginning() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{"1", "2", "3", "4"};
+        Object element = null;
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsAllNulls() throws Exception {
+        Object element = null;
+        PDataType baseType = PChar.INSTANCE;
+
+        PhoenixArray arr = null;
+        PhoenixArray expected = null;
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith268NullsAtBeginning() throws Exception {
+        //268 nulls at the beginning
+        Object[] o = new Object[270];
+        for (int i = 0; i < o.length - 2; i++)
+            o[i] = null;
+
+        o[o.length - 2] = "1";
+        o[o.length - 1] = "2";
+
+        Object[] o2 = new Object[271];
+        for (int i = 0; i < o2.length - 2; i++)
+            o2[i] = null;
+
+        o2[o2.length - 2] = "1";
+        o2[o2.length - 1] = "2";
+
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith241NullsAtBeginning() throws Exception {
+        //241 nulls at the beginning
+        Object[] o = new Object[243];
+        for (int i = 0; i < o.length - 2; i++)
+            o[i] = null;
+
+        o[o.length - 2] = "1";
+        o[o.length - 1] = "2";
+
+        Object[] o2 = new Object[244];
+        for (int i = 0; i < o2.length - 2; i++)
+            o2[i] = null;
+
+        o2[o2.length - 2] = "1";
+        o2[o2.length - 1] = "2";
+
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith254NullsAtBeginning() throws Exception {
+        //254 nulls at the beginning
+        Object[] o = new Object[256];
+        for (int i = 0; i < o.length - 2; i++)
+            o[i] = null;
+
+        o[o.length - 2] = "1";
+        o[o.length - 1] = "2";
+
+        Object[] o2 = new Object[257];
+        for (int i = 0; i < o2.length - 2; i++)
+            o2[i] = null;
+
+        o2[o2.length - 2] = "1";
+        o2[o2.length - 1] = "2";
+
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith510NullsAtBeginning() throws Exception {
+        //510 nulls at the beginning
+        Object[] o = new Object[512];
+        for (int i = 0; i < o.length - 2; i++)
+            o[i] = null;
+
+        o[o.length - 2] = "1";
+        o[o.length - 1] = "2";
+
+        Object[] o2 = new Object[513];
+        for (int i = 0; i < o2.length - 2; i++)
+            o2[i] = null;
+
+        o2[o2.length - 2] = "1";
+        o2[o2.length - 1] = "2";
+
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith509NullsAtBeginning() throws Exception {
+        //509 nulls at the beginning
+        Object[] o = new Object[511];
+        for (int i = 0; i < o.length - 2; i++)
+            o[i] = null;
+
+        o[o.length - 2] = "1";
+        o[o.length - 1] = "2";
+
+        Object[] o2 = new Object[512];
+        for (int i = 0; i < o2.length - 2; i++)
+            o2[i] = null;
+
+        o2[o2.length - 2] = "1";
+        o2[o2.length - 1] = "2";
+
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith1NullAtBeginning() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{null, "1   ", "2   ", "3   ", "4   "};
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWith2NullsAtBeginning() throws Exception {
+        Object[] o = new Object[]{null, "1   ", "2   ", "3   ", "4   "};
+        Object[] o2 = new Object[]{null, null, "1   ", "2   ", "3   ", "4   "};
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+
+    @Test
+    public void testForNullsWithNullsInMiddle() throws Exception {
+        Object[] o = new Object[]{"1   ", "2   ", null, "3   ", "4   "};
+        Object[] o2 = new Object[]{null, "1   ", "2   ", null, "3   ", "4   "};
+        Object element = null;
+        PDataType baseType = PVarchar.INSTANCE;
+
+        PhoenixArray arr = new PhoenixArray(baseType, o);
+        PhoenixArray expected = new PhoenixArray(baseType, o2);
+        test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+    }
+}


[46/50] [abbrv] phoenix git commit: PHOENIX 1968: Should support saving arrays

Posted by ma...@apache.org.
PHOENIX 1968: Should support saving arrays


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/31a1ca6c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/31a1ca6c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/31a1ca6c

Branch: refs/heads/calcite
Commit: 31a1ca6caefb45430969fc7c0d28b50bb515c605
Parents: db90196
Author: ravimagham <ra...@apache.org>
Authored: Thu Jun 11 11:50:21 2015 -0700
Committer: ravimagham <ra...@apache.org>
Committed: Thu Jun 11 11:50:21 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 21 ++++++++++++++++
 .../phoenix/spark/PhoenixRecordWritable.scala   | 25 ++++++++++++++++----
 2 files changed, 41 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/31a1ca6c/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 42e8676..5f256e6 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -415,4 +415,25 @@ class PhoenixSparkIT extends FunSuite with Matchers with BeforeAndAfterAll {
 
     results.toList shouldEqual checkResults
   }
+
+  test("Can save arrays back to phoenix") {
+    val dataSet = List((2L, Array("String1", "String2", "String3")))
+
+    sc
+      .parallelize(dataSet)
+      .saveToPhoenix(
+        "ARRAY_TEST_TABLE",
+        Seq("ID","VCARRAY"),
+        zkUrl = Some(quorumAddress)
+      )
+
+    // Load the results back
+    val stmt = conn.createStatement()
+    val rs = stmt.executeQuery("SELECT VCARRAY FROM ARRAY_TEST_TABLE WHERE ID = 2")
+    rs.next()
+    val sqlArray = rs.getArray(1).getArray().asInstanceOf[Array[String]]
+
+    // Verify the arrays are equal
+    sqlArray shouldEqual dataSet(0)._2
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/31a1ca6c/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
----------------------------------------------------------------------
diff --git a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
index 67e0bd2..3977657 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
@@ -16,11 +16,12 @@ package org.apache.phoenix.spark
 import java.sql.{PreparedStatement, ResultSet}
 import org.apache.hadoop.mapreduce.lib.db.DBWritable
 import org.apache.phoenix.mapreduce.util.ColumnInfoToStringEncoderDecoder
-import org.apache.phoenix.schema.types.{PDate, PhoenixArray}
+import org.apache.phoenix.schema.types.{PDataType, PDate, PhoenixArray}
 import org.joda.time.DateTime
 import scala.collection.{immutable, mutable}
 import scala.collection.JavaConversions._
 
+
 class PhoenixRecordWritable(var encodedColumns: String) extends DBWritable {
   val upsertValues = mutable.ArrayBuffer[Any]()
   val resultMap = mutable.Map[String, AnyRef]()
@@ -44,13 +45,27 @@ class PhoenixRecordWritable(var encodedColumns: String) extends DBWritable {
     upsertValues.zip(columns).zipWithIndex.foreach {
       case ((v, c), i) => {
         if (v != null) {
+
           // Both Java and Joda dates used to work in 4.2.3, but now they must be java.sql.Date
+          // Can override any other types here as needed
           val (finalObj, finalType) = v match {
-            case dt: DateTime => (new java.sql.Date(dt.getMillis), PDate.INSTANCE.getSqlType)
-            case d: java.util.Date => (new java.sql.Date(d.getTime), PDate.INSTANCE.getSqlType)
-            case _ => (v, c.getSqlType)
+            case dt: DateTime => (new java.sql.Date(dt.getMillis), PDate.INSTANCE)
+            case d: java.util.Date => (new java.sql.Date(d.getTime), PDate.INSTANCE)
+            case _ => (v, c.getPDataType)
+          }
+
+          // Save as array or object
+          finalObj match {
+            case obj: Array[AnyRef] => {
+              // Create a java.sql.Array, need to lookup the base sql type name
+              val sqlArray = statement.getConnection.createArrayOf(
+                PDataType.arrayBaseType(finalType).getSqlTypeName,
+                obj
+              )
+              statement.setArray(i + 1, sqlArray)
+            }
+            case _ => statement.setObject(i + 1, finalObj)
           }
-          statement.setObject(i + 1, finalObj, finalType)
         } else {
           statement.setNull(i + 1, c.getSqlType)
         }


[07/50] [abbrv] phoenix git commit: PHOENIX-1880 Connections from QueryUtil.getConnection don't work on secure clusters (Geoffrey Jacoby)

Posted by ma...@apache.org.
PHOENIX-1880 Connections from QueryUtil.getConnection don't work on secure clusters (Geoffrey Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1fa09dc5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1fa09dc5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1fa09dc5

Branch: refs/heads/calcite
Commit: 1fa09dc5c84ca92f57f6904bf88628133eb65995
Parents: d223f2c
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 30 13:39:23 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 30 13:39:23 2015 -0700

----------------------------------------------------------------------
 .../phoenix/mapreduce/util/ConnectionUtil.java  | 23 +++-----------------
 .../org/apache/phoenix/util/PropertiesUtil.java | 22 +++++++++++++++++++
 .../java/org/apache/phoenix/util/QueryUtil.java |  4 ++++
 .../apache/phoenix/util/PropertiesUtilTest.java | 23 +++++++++++++++++++-
 4 files changed, 51 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1fa09dc5/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
index e677104..294d4e9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java
@@ -26,6 +26,7 @@ import java.util.Properties;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 
 /**
@@ -54,7 +55,7 @@ public class ConnectionUtil {
     public static Connection getInputConnection(final Configuration conf , final Properties props) throws SQLException {
         Preconditions.checkNotNull(conf);
         return getConnection(PhoenixConfigurationUtil.getInputCluster(conf),
-                extractProperties(props, conf));
+                PropertiesUtil.extractProperties(props, conf));
     }
 
     /**
@@ -77,7 +78,7 @@ public class ConnectionUtil {
     public static Connection getOutputConnection(final Configuration conf, Properties props) throws SQLException {
         Preconditions.checkNotNull(conf);
         return getConnection(PhoenixConfigurationUtil.getOutputCluster(conf),
-                extractProperties(props, conf));
+                PropertiesUtil.extractProperties(props, conf));
     }
 
     /**
@@ -91,22 +92,4 @@ public class ConnectionUtil {
         return DriverManager.getConnection(QueryUtil.getUrl(quorum), props);
     }
 
-    /**
-     * Add properties from the given Configuration to the provided Properties.
-     *
-     * @param props properties to which connection information from the Configuration will be added
-     * @param conf configuration containing connection information
-     * @return the input Properties value, with additional connection information from the
-     * given Configuration
-     */
-    private static Properties extractProperties(Properties props, final Configuration conf) {
-        Iterator<Map.Entry<String, String>> iterator = conf.iterator();
-        if(iterator != null) {
-            while (iterator.hasNext()) {
-                Map.Entry<String, String> entry = iterator.next();
-                props.setProperty(entry.getKey(), entry.getValue());
-            }
-        }
-        return props;
-    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1fa09dc5/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
index d894e58..bcb9aa4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PropertiesUtil.java
@@ -17,7 +17,10 @@
  */
 package org.apache.phoenix.util;
 
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Properties;
+import org.apache.hadoop.conf.Configuration;
 
 public class PropertiesUtil {
 
@@ -36,4 +39,23 @@ public class PropertiesUtil {
         }
         return newProperties;
     }
+    
+     /**
+     * Add properties from the given Configuration to the provided Properties.
+     *
+     * @param props properties to which connection information from the Configuration will be added
+     * @param conf configuration containing connection information
+     * @return the input Properties value, with additional connection information from the
+     * given Configuration
+     */
+    public static Properties extractProperties(Properties props, final Configuration conf) {
+        Iterator<Map.Entry<String, String>> iterator = conf.iterator();
+        if(iterator != null) {
+            while (iterator.hasNext()) {
+                Map.Entry<String, String> entry = iterator.next();
+                props.setProperty(entry.getKey(), entry.getValue());
+            }
+        }
+        return props;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1fa09dc5/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index 993016a..d63a68f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -49,6 +49,8 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import java.util.Iterator;
+import java.util.Map;
 
 public final class QueryUtil {
 
@@ -266,6 +268,7 @@ public final class QueryUtil {
             SQLException {
         String url = getConnectionUrl(props, conf);
         LOG.info("Creating connection with the jdbc url:" + url);
+        PropertiesUtil.extractProperties(props, conf);
         return DriverManager.getConnection(url, props);
     }
 
@@ -316,4 +319,5 @@ public final class QueryUtil {
                 ("\"" + tableName + "\" ") +
                 (WHERE + " " + where);
     }
+    
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1fa09dc5/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
index b4608a2..17adfcb 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java
@@ -21,13 +21,18 @@ import static org.junit.Assert.assertEquals;
 
 import java.sql.SQLException;
 import java.util.Properties;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 
 import org.junit.Test;
 
 public class PropertiesUtilTest {
 
     private static final String SOME_TENANT_ID = "00Dxx0000001234";
-
+    private static final String SOME_OTHER_PROPERTY_KEY = "some_other_property";
+    private static final String SOME_OTHER_PROPERTY_VALUE = "some_other_value";
+    
     @Test
     public void testCopy() throws Exception{
         final Properties propsWithTenant = new Properties();
@@ -44,6 +49,22 @@ public class PropertiesUtilTest {
         verifyValidCopy(new Properties(propsWithTenant));
     }
 
+    @Test
+    public void testCopyFromConfiguration() throws Exception{
+        //make sure that we don't only copy the ZK quorum, but all
+        //properties
+        final Configuration conf = HBaseConfiguration.create();
+        final Properties props = new Properties();
+        
+        conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
+        conf.set(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY, 
+                PropertiesUtilTest.SOME_OTHER_PROPERTY_VALUE);
+        PropertiesUtil.extractProperties(props, conf);
+        assertEquals(props.getProperty(HConstants.ZOOKEEPER_QUORUM),
+                conf.get(HConstants.ZOOKEEPER_QUORUM));
+        assertEquals(props.getProperty(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY),
+                conf.get(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY));
+    }
     private void verifyValidCopy(Properties props) throws SQLException {
 
         Properties copy = PropertiesUtil.deepCopy(props);


[27/50] [abbrv] phoenix git commit: PHOENIX-1681 Use the new Region Interface (Andrew Purtell)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 272cac6..e7e1dd7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -111,7 +111,7 @@ public class StatisticsCollector {
         this.statsTable.close();
     }
 
-    public void updateStatistic(HRegion region) {
+    public void updateStatistic(Region region) {
         try {
             ArrayList<Mutation> mutations = new ArrayList<Mutation>();
             writeStatsToStatsTable(region, true, mutations, TimeKeeper.SYSTEM.getCurrentTime());
@@ -126,7 +126,7 @@ public class StatisticsCollector {
         }
     }
     
-    private void writeStatsToStatsTable(final HRegion region,
+    private void writeStatsToStatsTable(final Region region,
             boolean delete, List<Mutation> mutations, long currentTime) throws IOException {
         try {
             // update the statistics table
@@ -215,7 +215,7 @@ public class StatisticsCollector {
         }
     }
 
-    public InternalScanner createCompactionScanner(HRegion region, Store store, InternalScanner s) throws IOException {
+    public InternalScanner createCompactionScanner(Region region, Store store, InternalScanner s) throws IOException {
         // See if this is for Major compaction
         if (logger.isDebugEnabled()) {
             logger.debug("Compaction scanner created for stats");
@@ -224,13 +224,13 @@ public class StatisticsCollector {
         return getInternalScanner(region, store, s, cfKey);
     }
 
-    public void splitStats(HRegion parent, HRegion left, HRegion right) {
+    public void splitStats(Region parent, Region left, Region right) {
         try {
             if (logger.isDebugEnabled()) {
                 logger.debug("Collecting stats for split of " + parent.getRegionInfo() + " into " + left.getRegionInfo() + " and " + right.getRegionInfo());
             }
             List<Mutation> mutations = Lists.newArrayListWithExpectedSize(3);
-            for (byte[] fam : parent.getStores().keySet()) {
+            for (byte[] fam : parent.getTableDesc().getFamiliesKeys()) {
             	statsTable.splitStats(parent, left, right, this, new ImmutableBytesPtr(fam), mutations);
             }
             if (logger.isDebugEnabled()) {
@@ -243,7 +243,7 @@ public class StatisticsCollector {
         }
     }
 
-    protected InternalScanner getInternalScanner(HRegion region, Store store,
+    protected InternalScanner getInternalScanner(Region region, Store store,
             InternalScanner internalScan, ImmutableBytesPtr family) {
         return new StatisticsScanner(this, statsTable, region, internalScan, family);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 0e50923..582c4de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -26,9 +26,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 /**
@@ -38,11 +38,11 @@ public class StatisticsScanner implements InternalScanner {
     private static final Log LOG = LogFactory.getLog(StatisticsScanner.class);
     private InternalScanner delegate;
     private StatisticsWriter stats;
-    private HRegion region;
+    private Region region;
     private StatisticsCollector tracker;
     private ImmutableBytesPtr family;
 
-    public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, HRegion region,
+    public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, Region region,
             InternalScanner delegate, ImmutableBytesPtr family) {
         this.tracker = tracker;
         this.stats = stats;
@@ -85,17 +85,17 @@ public class StatisticsScanner implements InternalScanner {
             // Just verify if this if fine
             ArrayList<Mutation> mutations = new ArrayList<Mutation>();
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Deleting the stats for the region " + region.getRegionNameAsString()
+                LOG.debug("Deleting the stats for the region " + region.getRegionInfo().getRegionNameAsString()
                         + " as part of major compaction");
             }
-            stats.deleteStats(region.getRegionName(), this.tracker, family, mutations);
+            stats.deleteStats(region.getRegionInfo().getRegionName(), this.tracker, family, mutations);
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Adding new stats for the region " + region.getRegionNameAsString()
+                LOG.debug("Adding new stats for the region " + region.getRegionInfo().getRegionNameAsString()
                         + " as part of major compaction");
             }
-            stats.addStats(region.getRegionName(), this.tracker, family, mutations);
+            stats.addStats(region.getRegionInfo().getRegionName(), this.tracker, family, mutations);
             if (LOG.isDebugEnabled()) {
-                LOG.debug("Committing new stats for the region " + region.getRegionNameAsString()
+                LOG.debug("Committing new stats for the region " + region.getRegionInfo().getRegionNameAsString()
                         + " as part of major compaction");
             }
             stats.commitStats(mutations);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 8756568..834675c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -105,7 +105,7 @@ public class StatisticsWriter implements Closeable {
         statsWriterTable.close();
     }
 
-    public void splitStats(HRegion p, HRegion l, HRegion r, StatisticsCollector tracker, ImmutableBytesPtr cfKey,
+    public void splitStats(Region p, Region l, Region r, StatisticsCollector tracker, ImmutableBytesPtr cfKey,
             List<Mutation> mutations) throws IOException {
         if (tracker == null) { return; }
         boolean useMaxTimeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP;
@@ -113,8 +113,8 @@ public class StatisticsWriter implements Closeable {
             mutations.add(getLastStatsUpdatedTimePut(clientTimeStamp));
         }
         long readTimeStamp = useMaxTimeStamp ? HConstants.LATEST_TIMESTAMP : clientTimeStamp;
-        Result result = StatisticsUtil.readRegionStatistics(statsReaderTable, tableName, cfKey, p.getRegionName(),
-                readTimeStamp);
+        Result result = StatisticsUtil.readRegionStatistics(statsReaderTable, tableName, cfKey, 
+                p.getRegionInfo().getRegionName(), readTimeStamp);
         byte[] minKey = HConstants.EMPTY_BYTE_ARRAY;
         if (result != null && !result.isEmpty()) {
         	Cell cell = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
@@ -133,13 +133,13 @@ public class StatisticsWriter implements Closeable {
 
                 GuidePostsInfo guidePostsRegionInfo = GuidePostsInfo.deserializeGuidePostsInfo(cell.getValueArray(),
                         cell.getValueOffset(), cell.getValueLength(), rowCount);
-                byte[] pPrefix = StatisticsUtil.getRowKey(tableName, cfKey, p.getRegionName());
+                byte[] pPrefix = StatisticsUtil.getRowKey(tableName, cfKey, p.getRegionInfo().getRegionName());
                 mutations.add(new Delete(pPrefix, writeTimeStamp));
                 
 	        	long byteSize = 0;
                 Cell byteSizeCell = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
                         PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);                
-                int index = Collections.binarySearch(guidePostsRegionInfo.getGuidePosts(), r.getStartKey(),
+                int index = Collections.binarySearch(guidePostsRegionInfo.getGuidePosts(), r.getRegionInfo().getStartKey(),
                         Bytes.BYTES_COMPARATOR);
                 int size = guidePostsRegionInfo.getGuidePosts().size();
                 int midEndIndex, midStartIndex;
@@ -175,7 +175,7 @@ public class StatisticsWriter implements Closeable {
                     tracker.clear();
                     tracker.addGuidePost(cfKey, lguidePosts, leftByteCount, cell.getTimestamp(),
                         minKey);
-	                addStats(l.getRegionName(), tracker, cfKey, mutations);
+	                addStats(l.getRegionInfo().getRegionName(), tracker, cfKey, mutations);
 	            }
 	            if (midStartIndex < size) {
                     GuidePostsInfo rguidePosts =
@@ -184,7 +184,7 @@ public class StatisticsWriter implements Closeable {
 	                tracker.clear();
                     tracker.addGuidePost(cfKey, rguidePosts, rightByteCount, cell.getTimestamp(),
                         guidePostsRegionInfo.getGuidePosts().get(midStartIndex));
-	                addStats(r.getRegionName(), tracker, cfKey, mutations);
+	                addStats(r.getRegionInfo().getRegionName(), tracker, cfKey, mutations);
 	            }
         	}
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index ca25348..3bf6f23 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
@@ -305,47 +305,49 @@ public class IndexUtil {
         });
     }
 
-    public static HRegion getIndexRegion(RegionCoprocessorEnvironment environment)
+    public static Region getIndexRegion(RegionCoprocessorEnvironment environment)
             throws IOException {
-        HRegion dataRegion = environment.getRegion();
+        Region dataRegion = environment.getRegion();
         return getIndexRegion(dataRegion, environment.getRegionServerServices());
     }
 
-    public static HRegion
-            getIndexRegion(HRegion dataRegion, RegionServerCoprocessorEnvironment env)
+    public static Region
+            getIndexRegion(Region dataRegion, RegionServerCoprocessorEnvironment env)
                     throws IOException {
         return getIndexRegion(dataRegion, env.getRegionServerServices());
     }
 
-    public static HRegion getDataRegion(RegionCoprocessorEnvironment env) throws IOException {
-        HRegion indexRegion = env.getRegion();
+    public static Region getDataRegion(RegionCoprocessorEnvironment env) throws IOException {
+        Region indexRegion = env.getRegion();
         return getDataRegion(indexRegion, env.getRegionServerServices());
     }
 
-    public static HRegion
-            getDataRegion(HRegion indexRegion, RegionServerCoprocessorEnvironment env)
+    public static Region
+            getDataRegion(Region indexRegion, RegionServerCoprocessorEnvironment env)
                     throws IOException {
         return getDataRegion(indexRegion, env.getRegionServerServices());
     }
 
-    public static HRegion getIndexRegion(HRegion dataRegion, RegionServerServices rss) throws IOException {
+    public static Region getIndexRegion(Region dataRegion, RegionServerServices rss) throws IOException {
         TableName indexTableName =
                 TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(dataRegion.getTableDesc()
                         .getName()));
-        List<HRegion> onlineRegions = rss.getOnlineRegions(indexTableName);
-        for(HRegion indexRegion : onlineRegions) {
-            if (Bytes.compareTo(dataRegion.getStartKey(), indexRegion.getStartKey()) == 0) {
+        List<Region> onlineRegions = rss.getOnlineRegions(indexTableName);
+        for(Region indexRegion : onlineRegions) {
+            if (Bytes.compareTo(dataRegion.getRegionInfo().getStartKey(),
+                    indexRegion.getRegionInfo().getStartKey()) == 0) {
                 return indexRegion;
             }
         }
         return null;
     }
 
-    public static HRegion getDataRegion(HRegion indexRegion, RegionServerServices rss) throws IOException {
+    public static Region getDataRegion(Region indexRegion, RegionServerServices rss) throws IOException {
         TableName dataTableName = TableName.valueOf(MetaDataUtil.getUserTableName(indexRegion.getTableDesc().getNameAsString()));
-        List<HRegion> onlineRegions = rss.getOnlineRegions(dataTableName);
-        for(HRegion region : onlineRegions) {
-            if (Bytes.compareTo(indexRegion.getStartKey(), region.getStartKey()) == 0) {
+        List<Region> onlineRegions = rss.getOnlineRegions(dataTableName);
+        for(Region region : onlineRegions) {
+            if (Bytes.compareTo(indexRegion.getRegionInfo().getStartKey(),
+                    region.getRegionInfo().getStartKey()) == 0) {
                 return region;
             }
         }
@@ -466,7 +468,7 @@ public class IndexUtil {
     
     public static void wrapResultUsingOffset(final ObserverContext<RegionCoprocessorEnvironment> c,
             List<Cell> result, final int offset, ColumnReference[] dataColumns,
-            TupleProjector tupleProjector, HRegion dataRegion, IndexMaintainer indexMaintainer,
+            TupleProjector tupleProjector, Region dataRegion, IndexMaintainer indexMaintainer,
             byte[][] viewConstants, ImmutableBytesWritable ptr) throws IOException {
         if (tupleProjector != null) {
             // Join back to data table here by issuing a local get projecting

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
index e996b23..fa8bd85 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -65,7 +65,7 @@ public class TestLocalTableState {
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
     Mockito.when(env.getConfiguration()).thenReturn(conf);
 
-    HRegion region = Mockito.mock(HRegion.class);
+    Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     RegionScanner scanner = Mockito.mock(RegionScanner.class);
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
@@ -108,7 +108,7 @@ public class TestLocalTableState {
     // setup mocks
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
 
-    HRegion region = Mockito.mock(HRegion.class);
+    Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     RegionScanner scanner = Mockito.mock(RegionScanner.class);
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
@@ -154,7 +154,7 @@ public class TestLocalTableState {
     // setup mocks
     RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
 
-    HRegion region = Mockito.mock(HRegion.class);
+    Region region = Mockito.mock(Region.class);
     Mockito.when(env.getRegion()).thenReturn(region);
     RegionScanner scanner = Mockito.mock(RegionScanner.class);
     Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index ae577bd..b381e9f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -201,7 +201,7 @@ public class TestWALRecoveryCaching {
 
     // kill the server where the tables live - this should trigger distributed log splitting
     // find the regionserver that matches the passed server
-    List<HRegion> online = new ArrayList<HRegion>();
+    List<Region> online = new ArrayList<Region>();
     online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
       testTable.getTableName()));
     online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
@@ -267,9 +267,9 @@ public class TestWALRecoveryCaching {
    * @param table
    * @return
    */
-  private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
+  private List<Region> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
       byte[] table) {
-    List<HRegion> online = Collections.emptyList();
+    List<Region> online = Collections.emptyList();
     for (RegionServerThread rst : cluster.getRegionServerThreads()) {
       // if its the server we are going to kill, get the regions we want to reassign
       if (rst.getRegionServer().getServerName().equals(server)) {
@@ -305,14 +305,14 @@ public class TestWALRecoveryCaching {
       tryIndex = !tryIndex;
       for (ServerName server : servers) {
         // find the regionserver that matches the passed server
-        List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);
+        List<Region> online = getRegionsFromServerForTable(cluster, server, table);
 
         LOG.info("Shutting down and reassigning regions from " + server);
         cluster.stopRegionServer(server);
         cluster.waitForRegionServerToStop(server, TIMEOUT);
 
         // force reassign the regions from the table
-        for (HRegion region : online) {
+        for (Region region : online) {
           cluster.getMaster().assignRegion(region.getRegionInfo());
         }
 
@@ -363,10 +363,9 @@ public class TestWALRecoveryCaching {
 
   private Set<ServerName> getServersForTable(MiniHBaseCluster cluster, byte[] table)
       throws Exception {
-    List<HRegion> indexRegions = cluster.getRegions(table);
     Set<ServerName> indexServers = new HashSet<ServerName>();
-    for (HRegion region : indexRegions) {
-      indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionName()));
+    for (Region region : cluster.getRegions(table)) {
+      indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionInfo().getRegionName()));
     }
     return indexServers;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index cd28627..35b607e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -65,8 +65,8 @@ public class TestPerRegionIndexWriteCache {
     p2.add(family, qual, val);
   }
 
-  HRegion r1;
-  HRegion r2;
+  HRegion r1; // FIXME: Uses private type
+  HRegion r2; // FIXME: Uses private type
   WAL wal;
 
   @SuppressWarnings("deprecation")
@@ -212,4 +212,4 @@ public class TestPerRegionIndexWriteCache {
     // references around to these edits and have a memory leak
     assertNull("Got an entry for a region we removed", cache.getEdits(r1));
   }
-}
\ No newline at end of file
+}


[24/50] [abbrv] phoenix git commit: PHOENIX-1984 Make INSTR 1-based instead of 0-based

Posted by ma...@apache.org.
PHOENIX-1984 Make INSTR 1-based instead of 0-based

Bring functionality of INSTR built-in function in-line with other
SQL string functions, with indexing of strings starting at 1.

Signed-off-by: Gabriel Reid <ga...@ngdata.com>


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c2fed1da
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c2fed1da
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c2fed1da

Branch: refs/heads/calcite
Commit: c2fed1dac8305f489939fc18e47cd2c2a6c596d8
Parents: d3ff079
Author: NAVEEN MADHIRE <vm...@indiana.edu>
Authored: Mon May 18 22:14:57 2015 -0500
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu May 21 17:24:47 2015 +0200

----------------------------------------------------------------------
 .../apache/phoenix/end2end/InstrFunctionIT.java | 12 ++---
 .../expression/function/InstrFunction.java      |  2 +-
 .../expression/function/InstrFunctionTest.java  | 48 ++++++++++----------
 3 files changed, 31 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2fed1da/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
index 57c0661..b869ff4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
@@ -63,7 +63,7 @@ public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl());
         initTable(conn, "ASC", "abcdefghijkl","fgh");
         String queryToExecute = "SELECT INSTR(name, 'fgh') FROM SAMPLE";
-        testInstr(conn, queryToExecute, 5);
+        testInstr(conn, queryToExecute, 6);
     }
     
     @Test
@@ -71,7 +71,7 @@ public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl());
         initTable(conn, "DESC", "abcdefghijkl","fgh");
         String queryToExecute = "SELECT INSTR(name, 'fgh') FROM SAMPLE";
-        testInstr(conn, queryToExecute, 5);
+        testInstr(conn, queryToExecute, 6);
     }
     
     @Test
@@ -79,7 +79,7 @@ public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl());
         initTable(conn, "ASC", "abcde fghijkl","lmn");
         String queryToExecute = "SELECT INSTR(name, 'lmn') FROM SAMPLE";
-        testInstr(conn, queryToExecute, -1);
+        testInstr(conn, queryToExecute, 0);
     }
     
     @Test
@@ -87,7 +87,7 @@ public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl());
         initTable(conn, "DESC", "abcde fghijkl","lmn");
         String queryToExecute = "SELECT INSTR(name, 'lmn') FROM SAMPLE";
-        testInstr(conn, queryToExecute, -1);
+        testInstr(conn, queryToExecute, 0);
     }
 
     @Test
@@ -95,7 +95,7 @@ public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl());
         initTable(conn, "ASC", "AɚɦFGH","ɚɦ");
         String queryToExecute = "SELECT INSTR(name, 'ɚɦ') FROM SAMPLE";
-        testInstr(conn, queryToExecute, 1);
+        testInstr(conn, queryToExecute, 2);
     }
     
     @Test
@@ -103,7 +103,7 @@ public class InstrFunctionIT extends BaseHBaseManagedTimeIT {
         Connection conn = DriverManager.getConnection(getUrl());
         initTable(conn, "DESC", "AɚɦFGH","ɚɦ");
         String queryToExecute = "SELECT INSTR(name, 'ɚɦ') FROM SAMPLE";
-        testInstr(conn, queryToExecute, 1);
+        testInstr(conn, queryToExecute, 2);
     } 
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2fed1da/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
index 317d4b3..7a002f8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
@@ -82,7 +82,7 @@ public class InstrFunction extends ScalarFunction{
         
         String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(0).getSortOrder());
 
-        position = sourceStr.indexOf(strToSearch);
+        position = sourceStr.indexOf(strToSearch) + 1;
         ptr.set(PInteger.INSTANCE.toBytes(position));
         return true;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2fed1da/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
index 603ad39..359d772 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java
@@ -49,59 +49,59 @@ public class InstrFunctionTest {
     
     @Test
     public void testInstrFunction() throws SQLException {
-        inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 5, SortOrder.ASC);
+        inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 6, SortOrder.ASC);
         
-        inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 5, SortOrder.DESC);
+        inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 6, SortOrder.DESC);
         
-        inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 5, SortOrder.ASC);
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 6, SortOrder.ASC);
         
-        inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 5, SortOrder.DESC);
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 6, SortOrder.DESC);
         
-        inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", -1, SortOrder.DESC);
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", 0, SortOrder.DESC);
         
-        inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", -1, SortOrder.ASC);
+        inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", 0, SortOrder.ASC);
         
-        inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 5, SortOrder.ASC);
+        inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 6, SortOrder.ASC);
         
-        inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 5, SortOrder.DESC);
+        inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 6, SortOrder.DESC);
         
-        inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 8, SortOrder.ASC);
+        inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 9, SortOrder.ASC);
         
-        inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 8, SortOrder.DESC);
+        inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 9, SortOrder.DESC);
         
-        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 5, SortOrder.ASC);
+        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 6, SortOrder.ASC);
         
-        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 5, SortOrder.DESC);
+        inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 6, SortOrder.DESC);
         
         inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, "", 0, SortOrder.ASC);
         
         inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, "", 0, SortOrder.DESC);
         
-        inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 0, SortOrder.ASC);
+        inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 1, SortOrder.ASC);
         
-        inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 0, SortOrder.DESC);
+        inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 1, SortOrder.DESC);
         
-        inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 2, SortOrder.ASC);
+        inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 3, SortOrder.ASC);
         
-        inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 2, SortOrder.DESC);
+        inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 3, SortOrder.DESC);
         
-        inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 4, SortOrder.ASC);
+        inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 5, SortOrder.ASC);
         
-        inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 4, SortOrder.DESC);
+        inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 5, SortOrder.DESC);
         
         //Tests for MultiByte Characters
         
-        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 1, SortOrder.ASC);
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 2, SortOrder.ASC);
         
-        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 1, SortOrder.DESC);
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 2, SortOrder.DESC);
         
-        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 2, SortOrder.ASC);
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 3, SortOrder.ASC);
         
-        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 2, SortOrder.DESC);
+        inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 3, SortOrder.DESC);
         
-        inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 2, SortOrder.ASC);
+        inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 3, SortOrder.ASC);
         
-        inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 2, SortOrder.DESC);
+        inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 3, SortOrder.DESC);
     }
     
 


[45/50] [abbrv] phoenix git commit: PHOENIX-2027 Subqueries with no data are raising IllegalStateException(Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-2027 Subqueries with no data are raising IllegalStateException(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/db90196d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/db90196d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/db90196d

Branch: refs/heads/calcite
Commit: db90196dc2561a220fc376ce01a8ad1ba185bea8
Parents: b3ed60b
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Wed Jun 10 01:00:50 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Wed Jun 10 01:00:50 2015 +0530

----------------------------------------------------------------------
 .../apache/phoenix/end2end/SortMergeJoinIT.java | 54 ++++++++++++++++++++
 .../phoenix/execute/SortMergeJoinPlan.java      |  4 +-
 2 files changed, 56 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/db90196d/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
index 6f14a45..8b65ab3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
@@ -2658,5 +2658,59 @@ public class SortMergeJoinIT extends BaseHBaseManagedTimeIT {
         }
     }
 
+    @Test
+    public void testSubqueryWithoutData() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String GRAMMAR_TABLE = "CREATE TABLE IF NOT EXISTS GRAMMAR_TABLE (ID INTEGER PRIMARY KEY, " +
+                    "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," +
+                    "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + 
+                    "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + 
+                    "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + 
+                    "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + 
+                    "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))";
+
+            String LARGE_TABLE = "CREATE TABLE IF NOT EXISTS LARGE_TABLE (ID INTEGER PRIMARY KEY, " +
+                    "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," +
+                    "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + 
+                    "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + 
+                    "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + 
+                    "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + 
+                    "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))";
+
+            String SECONDARY_LARGE_TABLE = "CREATE TABLE IF NOT EXISTS SECONDARY_LARGE_TABLE (SEC_ID INTEGER PRIMARY KEY," +
+                    "sec_unsig_id UNSIGNED_INT, sec_big_id BIGINT, sec_usnig_long_id UNSIGNED_LONG, sec_tiny_id TINYINT," + 
+                    "sec_unsig_tiny_id UNSIGNED_TINYINT, sec_small_id SMALLINT, sec_unsig_small_id UNSIGNED_SMALLINT," + 
+                    "sec_float_id FLOAT, sec_unsig_float_id UNSIGNED_FLOAT, sec_double_id DOUBLE, sec_unsig_double_id UNSIGNED_DOUBLE," +
+                    "sec_decimal_id DECIMAL, sec_boolean_id BOOLEAN, sec_time_id TIME, sec_date_id DATE," +
+                    "sec_timestamp_id TIMESTAMP, sec_unsig_time_id TIME, sec_unsig_date_id DATE, sec_unsig_timestamp_id TIMESTAMP," +
+                    "sec_varchar_id VARCHAR (30), sec_char_id CHAR (30), sec_binary_id BINARY (100), sec_varbinary_id VARBINARY (100))";
+            createTestTable(getUrl(), GRAMMAR_TABLE);
+            createTestTable(getUrl(), LARGE_TABLE);
+            createTestTable(getUrl(), SECONDARY_LARGE_TABLE);
+
+            String ddl = "SELECT /*+USE_SORT_MERGE_JOIN*/ * FROM (SELECT ID, BIG_ID, DATE_ID FROM LARGE_TABLE AS A WHERE (A.ID % 5) = 0) AS A " +
+                    "INNER JOIN (SELECT SEC_ID, SEC_TINY_ID, SEC_UNSIG_FLOAT_ID FROM SECONDARY_LARGE_TABLE AS B WHERE (B.SEC_ID % 5) = 0) AS B " +     
+                    "ON A.ID=B.SEC_ID WHERE A.DATE_ID > ALL (SELECT SEC_DATE_ID FROM SECONDARY_LARGE_TABLE LIMIT 100) " +      
+                    "AND B.SEC_UNSIG_FLOAT_ID = ANY (SELECT sec_unsig_float_id FROM SECONDARY_LARGE_TABLE " +                                       
+                    "WHERE SEC_ID > ALL (SELECT MIN (ID) FROM GRAMMAR_TABLE WHERE UNSIG_ID IS NULL) AND " +
+                    "SEC_UNSIG_ID < ANY (SELECT DISTINCT(UNSIG_ID) FROM LARGE_TABLE WHERE UNSIG_ID<2500) LIMIT 1000) " +
+                    "AND A.ID < 10000";
+            ResultSet rs = conn.createStatement().executeQuery(ddl);
+            assertFalse(rs.next());  
+        } finally {
+            Statement statement = conn.createStatement();
+            String query = "drop table GRAMMAR_TABLE";
+            statement.executeUpdate(query);
+            query = "drop table LARGE_TABLE";
+            statement.executeUpdate(query);
+            query = "drop table SECONDARY_LARGE_TABLE";
+            statement.executeUpdate(query);
+            conn.close();
+        }
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/db90196d/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index 01e87e4..46ade33 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -53,11 +53,11 @@ import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.KeyValueSchema;
+import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueBitSet;
-import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ResultUtil;
@@ -485,7 +485,7 @@ public class SortMergeJoinPlan implements QueryPlan {
             this.expressions = expressions;
             this.keys = Lists.newArrayListWithExpectedSize(expressions.size());
             for (int i = 0; i < expressions.size(); i++) {
-                this.keys.add(new ImmutableBytesWritable());
+                this.keys.add(new ImmutableBytesWritable(EMPTY_PTR));
             }
         }
         


[50/50] [abbrv] phoenix git commit: Fix merge errors

Posted by ma...@apache.org.
Fix merge errors


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/62d6720f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/62d6720f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/62d6720f

Branch: refs/heads/calcite
Commit: 62d6720f7f2f1bef8a2c99ca2a6aa3feb29a51df
Parents: f9ddb98 d1934af
Author: maryannxue <we...@intel.com>
Authored: Mon Jun 15 14:45:03 2015 -0400
Committer: maryannxue <we...@intel.com>
Committed: Mon Jun 15 14:45:03 2015 -0400

----------------------------------------------------------------------
 .gitignore                                      |    1 +
 NOTICE                                          |   10 +
 bin/daemon.py                                   |  989 ++++
 bin/end2endTest.py                              |    5 +-
 bin/log4j.properties                            |    9 +-
 bin/phoenix_utils.py                            |  139 +-
 bin/psql.py                                     |    8 +-
 bin/queryserver.py                              |  203 +
 bin/sqlline-thin.py                             |   89 +
 bin/sqlline.py                                  |    6 +-
 dev/jenkinsEnv.sh                               |   31 +
 dev/make_rc.sh                                  |    5 +
 dev/smart-apply-patch.sh                        |   96 +
 dev/test-patch.properties                       |   35 +
 dev/test-patch.sh                               | 1081 +++++
 phoenix-assembly/pom.xml                        |   52 +-
 phoenix-assembly/src/build/client.xml           |   37 +-
 .../src/build/components-minimal.xml            |    2 +
 .../components/all-common-dependencies.xml      |    1 +
 .../src/build/components/all-common-files.xml   |    6 +-
 .../src/build/components/all-common-jars.xml    |   30 +-
 phoenix-assembly/src/build/src.xml              |    4 +-
 phoenix-core/pom.xml                            |   23 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |    4 +-
 .../org/apache/phoenix/end2end/ArrayIT.java     |  125 +-
 .../phoenix/end2end/ArrayPrependFunctionIT.java |  652 +++
 .../phoenix/end2end/ArraysWithNullsIT.java      |  300 ++
 .../phoenix/end2end/DecodeFunctionIT.java       |    9 +-
 .../phoenix/end2end/EncodeFunctionIT.java       |    8 +-
 .../phoenix/end2end/EvaluationOfORIT.java       |   11 +
 .../org/apache/phoenix/end2end/HashJoinIT.java  |   54 +
 .../apache/phoenix/end2end/InstrFunctionIT.java |   12 +-
 .../phoenix/end2end/MappingTableDataTypeIT.java |   67 +-
 .../phoenix/end2end/PhoenixMetricsIT.java       |    4 -
 .../end2end/QueryDatabaseMetaDataIT.java        |    5 +
 .../org/apache/phoenix/end2end/QueryMoreIT.java |    6 +-
 .../end2end/SkipScanAfterManualSplitIT.java     |    2 +-
 .../apache/phoenix/end2end/SortMergeJoinIT.java |   54 +
 .../phoenix/end2end/SqrtFunctionEnd2EndIT.java  |  143 +
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |   32 +-
 .../org/apache/phoenix/end2end/SubqueryIT.java  |   18 +
 .../end2end/TenantSpecificTablesDDLIT.java      |    5 +
 .../phoenix/end2end/ToDateFunctionIT.java       |   46 +-
 .../phoenix/end2end/UserDefinedFunctionsIT.java |  656 +++
 .../end2end/index/DropIndexDuringUpsertIT.java  |    2 +-
 .../apache/phoenix/execute/PartialCommitIT.java |  317 ++
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |    4 +-
 .../iterate/RoundRobinResultIteratorIT.java     |  319 ++
 .../apache/phoenix/mapreduce/IndexToolIT.java   |    3 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |   76 +-
 .../regionserver/IndexHalfStoreFileReader.java  |   31 +-
 .../IndexHalfStoreFileReaderGenerator.java      |    9 +-
 .../regionserver/IndexSplitTransaction.java     |  104 +-
 .../hbase/regionserver/LocalIndexMerger.java    |   19 +-
 .../hbase/regionserver/LocalIndexSplitter.java  |   11 +-
 .../org/apache/phoenix/cache/GlobalCache.java   |   30 +-
 .../apache/phoenix/cache/ServerCacheClient.java |   10 +-
 .../cache/aggcache/SpillableGroupByCache.java   |   13 +-
 .../apache/phoenix/compile/ColumnResolver.java  |   17 +
 .../phoenix/compile/CreateFunctionCompiler.java |   80 +
 .../phoenix/compile/CreateIndexCompiler.java    |    2 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   15 +-
 .../phoenix/compile/ExpressionCompiler.java     |   33 +-
 .../apache/phoenix/compile/FromCompiler.java    |  199 +-
 .../apache/phoenix/compile/JoinCompiler.java    |    9 +-
 .../apache/phoenix/compile/PostDDLCompiler.java |   14 +
 .../phoenix/compile/ProjectionCompiler.java     |    2 +-
 .../apache/phoenix/compile/QueryCompiler.java   |   18 +-
 .../org/apache/phoenix/compile/QueryPlan.java   |   11 +
 .../apache/phoenix/compile/RowProjector.java    |   32 +-
 .../phoenix/compile/StatementNormalizer.java    |    5 +-
 .../phoenix/compile/SubqueryRewriter.java       |    6 +-
 .../phoenix/compile/SubselectRewriter.java      |    2 +-
 .../apache/phoenix/compile/TraceQueryPlan.java  |    5 +
 .../compile/TupleProjectionCompiler.java        |    4 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   13 +-
 .../phoenix/coprocessor/BaseRegionScanner.java  |   16 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   97 +-
 .../coprocessor/DelegateRegionScanner.java      |   23 +-
 .../GroupedAggregateRegionObserver.java         |   66 +-
 .../coprocessor/HashJoinRegionScanner.java      |   58 +-
 .../coprocessor/MetaDataEndpointImpl.java       |  713 ++-
 .../phoenix/coprocessor/MetaDataProtocol.java   |   39 +-
 .../coprocessor/MetaDataRegionObserver.java     |   23 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |   28 +-
 .../coprocessor/SequenceRegionObserver.java     |   16 +-
 .../UngroupedAggregateRegionObserver.java       |   84 +-
 .../coprocessor/generated/MetaDataProtos.java   | 4274 +++++++++++++++---
 .../coprocessor/generated/PFunctionProtos.java  | 2942 ++++++++++++
 .../phoenix/exception/SQLExceptionCode.java     |   20 +-
 .../phoenix/exception/SQLExceptionInfo.java     |   16 +
 .../apache/phoenix/execute/AggregatePlan.java   |    5 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |    6 +
 .../apache/phoenix/execute/CommitException.java |   35 +-
 .../phoenix/execute/DegenerateQueryPlan.java    |    5 +
 .../phoenix/execute/DelegateQueryPlan.java      |    6 +
 .../apache/phoenix/execute/HashJoinPlan.java    |    7 +-
 .../apache/phoenix/execute/MutationState.java   |  158 +-
 .../org/apache/phoenix/execute/ScanPlan.java    |   21 +-
 .../phoenix/execute/SortMergeJoinPlan.java      |    9 +-
 .../org/apache/phoenix/execute/UnionPlan.java   |    5 +
 .../phoenix/expression/ExpressionType.java      |   24 +-
 .../phoenix/expression/LikeExpression.java      |    8 +-
 .../function/ArrayAppendFunction.java           |   35 +-
 .../function/ArrayModifierFunction.java         |   75 +
 .../function/ArrayPrependFunction.java          |   96 +
 .../expression/function/InstrFunction.java      |    2 +-
 .../function/JavaMathOneArgumentFunction.java   |   77 +
 .../function/RegexpReplaceFunction.java         |   74 +-
 .../function/RegexpSplitFunction.java           |   59 +-
 .../function/RegexpSubstrFunction.java          |   97 +-
 .../expression/function/ScalarFunction.java     |    2 +-
 .../expression/function/SignFunction.java       |    5 +
 .../expression/function/SqrtFunction.java       |   49 +
 .../expression/function/UDFExpression.java      |  220 +
 .../util/regex/AbstractBasePattern.java         |    9 +-
 .../util/regex/AbstractBaseSplitter.java        |    2 +-
 .../expression/util/regex/GuavaSplitter.java    |    6 +-
 .../expression/util/regex/JONIPattern.java      |   40 +-
 .../expression/util/regex/JavaPattern.java      |   51 +-
 .../visitor/CloneExpressionVisitor.java         |    6 +
 .../phoenix/filter/RowKeyComparisonFilter.java  |    5 +-
 .../hbase/index/covered/data/LocalTable.java    |    7 +-
 .../index/covered/filter/FamilyOnlyFilter.java  |   80 -
 .../index/scanner/FilteredKeyValueScanner.java  |    2 +-
 .../write/ParallelWriterIndexCommitter.java     |    8 +-
 .../recovery/PerRegionIndexWriteCache.java      |   10 +-
 .../recovery/StoreFailuresInCachePolicy.java    |    4 +-
 .../TrackingParallelWriterIndexCommitter.java   |    8 +-
 .../apache/phoenix/index/IndexMaintainer.java   |   57 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |   10 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |   14 +-
 .../phoenix/iterate/BaseResultIterators.java    |    6 +-
 .../phoenix/iterate/ParallelIterators.java      |    7 +-
 .../iterate/RegionScannerResultIterator.java    |    9 +-
 .../iterate/RoundRobinResultIterator.java       |  329 ++
 .../apache/phoenix/iterate/SerialIterators.java |    3 +-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  114 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   30 +
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  |   46 +-
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java     |   33 +-
 .../phoenix/jdbc/PhoenixPreparedStatement.java  |    7 +-
 .../apache/phoenix/jdbc/PhoenixResultSet.java   |    8 +-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  140 +-
 .../org/apache/phoenix/join/HashJoinInfo.java   |   17 +-
 .../phoenix/mapreduce/CsvBulkLoadTool.java      |   95 +-
 .../phoenix/mapreduce/CsvToKeyValueMapper.java  |   26 +-
 .../phoenix/mapreduce/PhoenixRecordReader.java  |    9 +-
 .../phoenix/mapreduce/util/ConnectionUtil.java  |   23 +-
 .../apache/phoenix/optimize/QueryOptimizer.java |    4 +-
 .../phoenix/parse/CreateFunctionStatement.java  |   42 +
 .../phoenix/parse/CreateIndexStatement.java     |    8 +-
 .../org/apache/phoenix/parse/DMLStatement.java  |   11 +-
 .../apache/phoenix/parse/DeleteStatement.java   |    5 +-
 .../phoenix/parse/DropFunctionStatement.java    |   41 +
 .../apache/phoenix/parse/FunctionParseNode.java |   76 +-
 .../parse/IndexExpressionParseNodeRewriter.java |    4 +-
 .../org/apache/phoenix/parse/NamedNode.java     |    2 +-
 .../org/apache/phoenix/parse/PFunction.java     |  252 ++
 .../apache/phoenix/parse/ParseNodeFactory.java  |   70 +-
 .../apache/phoenix/parse/ParseNodeRewriter.java |    2 +-
 .../apache/phoenix/parse/SelectStatement.java   |   24 +-
 .../org/apache/phoenix/parse/UDFParseNode.java  |   27 +
 .../apache/phoenix/parse/UpsertStatement.java   |    9 +-
 .../apache/phoenix/protobuf/ProtobufUtil.java   |   14 +-
 .../phoenix/query/ConnectionQueryServices.java  |    4 +
 .../query/ConnectionQueryServicesImpl.java      |  179 +-
 .../query/ConnectionlessQueryServicesImpl.java  |   52 +-
 .../query/DelegateConnectionQueryServices.java  |   31 +
 .../apache/phoenix/query/MetaDataMutated.java   |    3 +
 .../apache/phoenix/query/QueryConstants.java    |   36 +
 .../org/apache/phoenix/query/QueryServices.java |    8 +
 .../phoenix/query/QueryServicesOptions.java     |   23 +-
 .../schema/FunctionAlreadyExistsException.java  |   58 +
 .../schema/FunctionNotFoundException.java       |   52 +
 .../apache/phoenix/schema/MetaDataClient.java   |  258 +-
 .../NewerFunctionAlreadyExistsException.java    |   39 +
 .../org/apache/phoenix/schema/PColumnImpl.java  |    8 +-
 .../org/apache/phoenix/schema/PMetaData.java    |    6 +-
 .../apache/phoenix/schema/PMetaDataEntity.java  |   22 +
 .../apache/phoenix/schema/PMetaDataImpl.java    |  118 +-
 .../java/org/apache/phoenix/schema/PTable.java  |    3 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |   44 +-
 .../phoenix/schema/stats/GuidePostsInfo.java    |    9 +-
 .../schema/stats/StatisticsCollector.java       |   47 +-
 .../phoenix/schema/stats/StatisticsScanner.java |   26 +-
 .../phoenix/schema/stats/StatisticsUtil.java    |    2 +
 .../phoenix/schema/stats/StatisticsWriter.java  |   57 +-
 .../phoenix/schema/types/PArrayDataType.java    |  161 +-
 .../apache/phoenix/schema/types/PBinary.java    |    2 +-
 .../apache/phoenix/schema/types/PBoolean.java   |   18 +-
 .../phoenix/schema/types/PBooleanArray.java     |   31 +-
 .../org/apache/phoenix/schema/types/PChar.java  |    5 +-
 .../apache/phoenix/schema/types/PDataType.java  |   15 +-
 .../org/apache/phoenix/schema/types/PDate.java  |   21 +-
 .../apache/phoenix/schema/types/PDecimal.java   |    3 +
 .../apache/phoenix/schema/types/PTimestamp.java |   22 +-
 .../apache/phoenix/schema/types/PVarchar.java   |    2 +-
 .../phoenix/schema/types/PhoenixArray.java      |  127 +-
 .../java/org/apache/phoenix/util/IndexUtil.java |   38 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |    7 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java |   13 +-
 .../org/apache/phoenix/util/PropertiesUtil.java |   22 +
 .../java/org/apache/phoenix/util/QueryUtil.java |   47 +-
 .../org/apache/phoenix/util/ReadOnlyProps.java  |   37 +
 .../java/org/apache/phoenix/util/ScanUtil.java  |   22 +
 .../org/apache/phoenix/util/SchemaUtil.java     |   17 +-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |    6 +-
 .../phoenix/compile/QueryCompilerTest.java      |   62 +-
 .../phoenix/execute/MutationStateTest.java      |   64 +
 .../expression/ArrayAppendFunctionTest.java     |   29 +-
 .../expression/ArrayPrependFunctionTest.java    |  552 +++
 .../phoenix/expression/SignFunctionTest.java    |    3 +-
 .../phoenix/expression/SqrtFunctionTest.java    |  150 +
 .../expression/function/InstrFunctionTest.java  |   48 +-
 .../util/regex/PatternPerformanceTest.java      |   20 +-
 .../phoenix/filter/SkipScanBigFilterTest.java   |    2 +-
 .../index/covered/TestLocalTableState.java      |    9 +-
 .../covered/filter/TestFamilyOnlyFilter.java    |  106 -
 .../index/write/TestWALRecoveryCaching.java     |   21 +-
 .../recovery/TestPerRegionIndexWriteCache.java  |   26 +-
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java |   34 +-
 .../phoenix/mapreduce/CsvBulkLoadToolTest.java  |   11 -
 .../mapreduce/CsvToKeyValueMapperTest.java      |   15 -
 .../apache/phoenix/parse/QueryParserTest.java   |   18 -
 .../java/org/apache/phoenix/query/BaseTest.java |    6 +-
 .../query/ParallelIteratorsSplitTest.java       |   20 +
 .../phoenix/query/QueryServicesTestImpl.java    |    5 +-
 .../org/apache/phoenix/schema/PCharPadTest.java |  147 +
 .../phoenix/schema/PDataTypeForArraysTest.java  | 1141 -----
 .../types/BasePhoenixArrayToStringTest.java     |   84 +
 ...PrimitiveDoublePhoenixArrayToStringTest.java |   39 +
 ...asePrimitiveIntPhoenixArrayToStringTest.java |   50 +
 .../schema/types/PDataTypeForArraysTest.java    | 1141 +++++
 .../schema/types/PDateArrayToStringTest.java    |   80 +
 .../schema/types/PVarcharArrayToStringTest.java |   71 +
 ...rimitiveBooleanPhoenixArrayToStringTest.java |   56 +
 .../PrimitiveBytePhoenixArrayToStringTest.java  |   34 +
 ...PrimitiveDoublePhoenixArrayToStringTest.java |   36 +
 .../PrimitiveFloatPhoenixArrayToStringTest.java |   36 +
 .../PrimitiveIntPhoenixArrayToStringTest.java   |   27 +
 .../PrimitiveLongPhoenixArrayToStringTest.java  |   35 +
 .../PrimitiveShortPhoenixArrayToStringTest.java |   36 +
 .../apache/phoenix/util/PhoenixRuntimeTest.java |   75 +
 .../apache/phoenix/util/PropertiesUtilTest.java |   23 +-
 .../org/apache/phoenix/util/QueryUtilTest.java  |   33 +-
 phoenix-flume/pom.xml                           |   15 +-
 phoenix-pherf/cluster/pherf.sh                  |    2 +-
 phoenix-pherf/config/pherf.properties           |    3 +
 phoenix-pherf/pom.xml                           |  146 +-
 .../org/apache/phoenix/pherf/DataIngestIT.java  |   87 +
 .../apache/phoenix/pherf/ResultBaseTestIT.java  |   45 +
 .../apache/phoenix/pherf/SchemaReaderIT.java    |   77 +
 .../java/org/apache/phoenix/pherf/Pherf.java    |    8 +-
 .../apache/phoenix/pherf/PherfConstants.java    |   50 +-
 .../phoenix/pherf/configuration/DataModel.java  |   11 +-
 .../pherf/configuration/DataOverride.java       |    3 +-
 .../pherf/configuration/DataTypeMapping.java    |    5 +-
 .../phoenix/pherf/configuration/QuerySet.java   |   14 +-
 .../phoenix/pherf/configuration/Scenario.java   |   13 +
 .../pherf/configuration/XMLConfigParser.java    |    1 +
 .../pherf/exception/FileLoaderException.java    |    4 +-
 .../exception/FileLoaderRuntimeException.java   |    1 +
 .../phoenix/pherf/jmx/MonitorManager.java       |    6 +-
 .../pherf/jmx/monitors/ExampleMonitor.java      |   33 -
 .../phoenix/pherf/loaddata/DataLoader.java      |   51 +-
 .../pherf/result/DataLoadThreadTime.java        |   19 +-
 .../pherf/result/DataLoadTimeSummary.java       |   17 -
 .../phoenix/pherf/result/DataModelResult.java   |    1 +
 .../phoenix/pherf/result/QueryResult.java       |    2 +
 .../phoenix/pherf/result/QuerySetResult.java    |    3 +-
 .../org/apache/phoenix/pherf/result/Result.java |   10 +-
 .../apache/phoenix/pherf/result/ResultUtil.java |   75 +-
 .../apache/phoenix/pherf/result/RunTime.java    |   23 +-
 .../phoenix/pherf/result/ScenarioResult.java    |    1 +
 .../apache/phoenix/pherf/result/ThreadTime.java |    1 +
 .../pherf/result/impl/CSVResultHandler.java     |   26 +-
 .../pherf/result/impl/ImageResultHandler.java   |    5 +-
 .../pherf/result/impl/XMLResultHandler.java     |    6 +-
 .../phoenix/pherf/schema/SchemaReader.java      |    7 +-
 .../apache/phoenix/pherf/util/PhoenixUtil.java  |   52 +-
 .../apache/phoenix/pherf/util/ResourceList.java |   47 +-
 .../phoenix/pherf/workload/QueryExecutor.java   |    2 -
 .../pherf/workload/WorkloadExecutor.java        |    6 +-
 .../datamodel/create_prod_test_unsalted.sql     |    2 +-
 .../scenario/prod_test_unsalted_scenario.xml    |    4 +-
 .../phoenix/pherf/BaseTestWithCluster.java      |   67 -
 .../phoenix/pherf/ConfigurationParserTest.java  |   12 +-
 .../apache/phoenix/pherf/DataIngestTest.java    |   78 -
 .../apache/phoenix/pherf/DataLoaderTest.java    |  108 -
 .../org/apache/phoenix/pherf/PherfTest.java     |   26 +-
 .../org/apache/phoenix/pherf/ResourceTest.java  |    8 +-
 .../apache/phoenix/pherf/ResultBaseTest.java    |   44 +
 .../org/apache/phoenix/pherf/ResultTest.java    |   21 +-
 .../apache/phoenix/pherf/RowCalculatorTest.java |   88 +
 .../apache/phoenix/pherf/RuleGeneratorTest.java |    7 +-
 .../apache/phoenix/pherf/SchemaReaderTest.java  |   73 -
 .../apache/phoenix/pherf/TestHBaseProps.java    |    1 -
 .../test/resources/datamodel/test_schema.sql    |   33 +-
 .../test/resources/scenario/test_scenario.xml   |    4 +-
 phoenix-pherf/standalone/pherf.sh               |    2 +-
 phoenix-pig/pom.xml                             |   37 +-
 phoenix-protocol/src/main/MetaDataService.proto |   37 +-
 phoenix-protocol/src/main/PFunction.proto       |   45 +
 phoenix-server-client/pom.xml                   |   66 +
 phoenix-server-client/src/build/thin-client.xml |   41 +
 .../phoenix/queryserver/client/Driver.java      |   49 +
 .../queryserver/client/ThinClientUtil.java      |   35 +
 .../resources/META-INF/services/java.sql.Driver |    1 +
 .../org-apache-phoenix-remote-jdbc.properties   |   25 +
 phoenix-server/pom.xml                          |  111 +
 .../src/build/query-server-runnable.xml         |   48 +
 .../phoenix/end2end/QueryServerBasicsIT.java    |  157 +
 .../phoenix/end2end/QueryServerThread.java      |   45 +
 .../src/it/resources/log4j.properties           |   63 +
 .../apache/phoenix/queryserver/server/Main.java |  229 +
 .../queryserver/server/PhoenixMetaFactory.java  |   28 +
 .../server/PhoenixMetaFactoryImpl.java          |   76 +
 .../apache/phoenix/DriverCohabitationTest.java  |   65 +
 phoenix-spark/pom.xml                           |   10 +-
 .../apache/phoenix/spark/PhoenixSparkIT.scala   |   22 +
 .../apache/phoenix/spark/DefaultSource.scala    |   17 +
 .../org/apache/phoenix/spark/PhoenixRDD.scala   |    2 +-
 .../phoenix/spark/PhoenixRecordWritable.scala   |   25 +-
 .../apache/phoenix/spark/PhoenixRelation.scala  |   17 +
 pom.xml                                         |  118 +-
 src/main/config/checkstyle/checker.xml          |  281 ++
 src/main/config/checkstyle/header.txt           |   16 +
 src/main/config/checkstyle/suppressions.xml     |   46 +
 329 files changed, 22750 insertions(+), 4261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 180c895,d613688..989426c
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@@ -142,14 -143,13 +143,14 @@@ public class GroupedAggregateRegionObse
              }
              ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
              innerScanner =
-                     getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, 
+                     getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector,
                              dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
-         } 
+         }
  
          if (j != null) {
 +            TupleProjector postJoinProjector = TupleProjector.deserializeProjectorFromScan(scan, false);
              innerScanner =
 -                    new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan),
 +                    new HashJoinRegionScanner(innerScanner, p, postJoinProjector, j, ScanUtil.getTenantId(scan),
                              c.getEnvironment());
          }
  

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index f969ce9,1e34d96..6fdc865
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@@ -217,19 -217,16 +219,20 @@@ public class HashJoinRegionScanner impl
      private boolean shouldAdvance() {
          if (!resultQueue.isEmpty())
              return false;
-         
+ 
          return hasMore;
      }
-     
+ 
      private boolean nextInQueue(List<Cell> results) {
-         if (resultQueue.isEmpty())
+         if (resultQueue.isEmpty()) {
              return false;
-         
+         }
+ 
          Tuple tuple = resultQueue.poll();
 +        // post-join projection
 +        if (postJoinProjector != null) {
 +            tuple = postJoinProjector.projectResults(tuple);
 +        }
          for (int i = 0; i < tuple.size(); i++) {
              results.add(tuple.getValue(i));
          }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 630e471,d5cc486..fa78709
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@@ -226,15 -227,14 +227,15 @@@ public class UngroupedAggregateRegionOb
              }
              ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
              theScanner =
-                     getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, 
+                     getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
                              dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
-         } 
-         
+         }
+ 
          if (j != null)  {
 -            theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment());
 +            TupleProjector postJoinProjector = TupleProjector.deserializeProjectorFromScan(scan, false);
 +            theScanner = new HashJoinRegionScanner(theScanner, p, postJoinProjector, j, ScanUtil.getTenantId(scan), c.getEnvironment());
          }
-         
+ 
          int batchSize = 0;
          List<Mutation> mutations = Collections.emptyList();
          boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
index 87b944b,362e98d..efbaaa6
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
@@@ -36,13 -38,6 +38,13 @@@ import org.apache.phoenix.parse.Functio
   * @since 0.1
   */
  public class SelectStatement implements FilterableStatement {
 +    public static final SelectStatement SELECT_STAR =
 +            new SelectStatement(
 +                    null, null, false, 
 +                    Collections.<AliasedNode>singletonList(new AliasedNode(null, WildcardParseNode.INSTANCE)),
 +                    null, Collections.<ParseNode>emptyList(),
 +                    null, Collections.<OrderByNode>emptyList(),
-                     null, 0, false, false, Collections.<SelectStatement>emptyList());
++                    null, 0, false, false, Collections.<SelectStatement>emptyList(), new HashMap<String, UDFParseNode>(1));
      public static final SelectStatement SELECT_ONE =
              new SelectStatement(
                      null, null, false, 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62d6720f/pom.xml
----------------------------------------------------------------------


[12/50] [abbrv] phoenix git commit: PHOENIX-1958 Minimize memory allocation on new connection

Posted by ma...@apache.org.
PHOENIX-1958 Minimize memory allocation on new connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/93397aff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/93397aff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/93397aff

Branch: refs/heads/calcite
Commit: 93397affd75fb5877146ca7b4bb028db301f671e
Parents: cd81738
Author: James Taylor <jt...@salesforce.com>
Authored: Sat May 9 18:13:49 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat May 9 18:13:49 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/util/ReadOnlyProps.java    | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/93397aff/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index 47137ef..a6fb7a5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -27,6 +27,9 @@ import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.google.common.base.Objects;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
@@ -40,6 +43,7 @@ import com.google.common.collect.Maps;
  * @since 1.2.2
  */
 public class ReadOnlyProps implements Iterable<Entry<String, String>> {
+    private static final Logger logger = LoggerFactory.getLogger(ReadOnlyProps.class);
     public static final ReadOnlyProps EMPTY_PROPS = new ReadOnlyProps();
     private final Map<String, String> props;
     
@@ -296,6 +300,7 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
             String value = entry.getValue().toString();
             String oldValue = props.get(key);
             if (!Objects.equal(oldValue, value)) {
+                if (logger.isDebugEnabled()) logger.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
                 return new ReadOnlyProps(this, overrides);
             }
         }


[30/50] [abbrv] phoenix git commit: PHOENIX-2005 Connection utilities omit zk client port, parent znode

Posted by ma...@apache.org.
PHOENIX-2005 Connection utilities omit zk client port, parent znode


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/afb0120e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/afb0120e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/afb0120e

Branch: refs/heads/calcite
Commit: afb0120e079502d926c5f37de4e28d3865e29089
Parents: a28c1d3
Author: Nick Dimiduk <nd...@apache.org>
Authored: Tue May 26 11:11:48 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Tue May 26 11:12:28 2015 -0700

----------------------------------------------------------------------
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java     | 28 ++++--
 .../phoenix/mapreduce/CsvBulkLoadTool.java      | 93 ++++++++++----------
 .../phoenix/mapreduce/CsvToKeyValueMapper.java  | 26 +-----
 .../query/ConnectionQueryServicesImpl.java      |  4 +-
 .../java/org/apache/phoenix/util/QueryUtil.java | 45 ++++++++--
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java | 14 ++-
 .../phoenix/mapreduce/CsvBulkLoadToolTest.java  | 11 ---
 .../mapreduce/CsvToKeyValueMapperTest.java      | 15 ----
 .../org/apache/phoenix/util/QueryUtilTest.java  | 33 ++++---
 9 files changed, 139 insertions(+), 130 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 9e95667..2451603 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -31,6 +31,7 @@ import java.util.logging.Logger;
 
 import javax.annotation.concurrent.Immutable;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -174,10 +175,10 @@ public abstract class PhoenixEmbeddedDriver implements Driver, org.apache.phoeni
     }
     
     /**
-     * 
+     *
      * Class to encapsulate connection info for HBase
      *
-     * 
+     *
      * @since 0.1.1
      */
     public static class ConnectionInfo {
@@ -204,12 +205,18 @@ public abstract class PhoenixEmbeddedDriver implements Driver, org.apache.phoeni
             return false;
         }
         
-        protected static ConnectionInfo create(String url) throws SQLException {
-            StringTokenizer tokenizer = new StringTokenizer(url == null ? "" : url.substring(PhoenixRuntime.JDBC_PROTOCOL.length()),DELIMITERS, true);
+        public static ConnectionInfo create(String url) throws SQLException {
+            url = url == null ? "" : url;
+            url = url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)
+                    ? url.substring(PhoenixRuntime.JDBC_PROTOCOL.length())
+                    : url;
+            StringTokenizer tokenizer = new StringTokenizer(url, DELIMITERS, true);
             int nTokens = 0;
             String[] tokens = new String[5];
             String token = null;
-            while (tokenizer.hasMoreTokens() && !(token=tokenizer.nextToken()).equals(TERMINATOR) && tokenizer.hasMoreTokens() && nTokens < tokens.length) {
+            while (tokenizer.hasMoreTokens() &&
+                    !(token=tokenizer.nextToken()).equals(TERMINATOR) &&
+                    tokenizer.hasMoreTokens() && nTokens < tokens.length) {
                 token = tokenizer.nextToken();
                 // This would mean we have an empty string for a token which is illegal
                 if (DELIMITERS.contains(token)) {
@@ -316,8 +323,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, org.apache.phoeni
         private final String principal;
         private final String keytab;
         
-        // used for testing
-        ConnectionInfo(String zookeeperQuorum, Integer port, String rootNode, String principal, String keytab) {
+        public ConnectionInfo(String zookeeperQuorum, Integer port, String rootNode, String principal, String keytab) {
             this.zookeeperQuorum = zookeeperQuorum;
             this.port = port;
             this.rootNode = rootNode;
@@ -326,8 +332,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, org.apache.phoeni
             this.keytab = keytab;
         }
         
-        // used for testing
-        ConnectionInfo(String zookeeperQuorum, Integer port, String rootNode) {
+        public ConnectionInfo(String zookeeperQuorum, Integer port, String rootNode) {
         	this(zookeeperQuorum, port, rootNode, null, null);
         }
 
@@ -417,6 +422,11 @@ public abstract class PhoenixEmbeddedDriver implements Driver, org.apache.phoeni
 					+ (principal == null ? "" : ":" + principal)
 					+ (keytab == null ? "" : ":" + keytab);
 		}
+
+        public String toUrl() {
+            return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR
+                    + toString();
+        }
     }
 
     public static boolean isTestUrl(String url) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
index a5a8aa1..9e27bac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
@@ -18,11 +18,11 @@
 package org.apache.phoenix.mapreduce;
 
 import java.sql.Connection;
-import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -41,7 +41,6 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -56,8 +55,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.job.JobManager;
-import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -67,6 +66,7 @@ import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.slf4j.Logger;
@@ -84,7 +84,7 @@ public class CsvBulkLoadTool extends Configured implements Tool {
 
     private static final Logger LOG = LoggerFactory.getLogger(CsvBulkLoadTool.class);
 
-    static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Zookeeper quorum to connect to (optional)");
+    static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)");
     static final Option INPUT_PATH_OPT = new Option("i", "input", true, "Input CSV path (mandatory)");
     static final Option OUTPUT_PATH_OPT = new Option("o", "output", true, "Output path for temporary HFiles (optional)");
     static final Option SCHEMA_NAME_OPT = new Option("s", "schema", true, "Phoenix schema name (optional)");
@@ -184,35 +184,48 @@ public class CsvBulkLoadTool extends Configured implements Tool {
         } catch (IllegalStateException e) {
             printHelpAndExit(e.getMessage(), getOptions());
         }
-        Class.forName(DriverManager.class.getName());
-        Connection conn = DriverManager.getConnection(
-                getJdbcUrl(cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt())));
-        
-        return loadData(conf, cmdLine, conn);
+        return loadData(conf, cmdLine);
     }
 
-	private int loadData(Configuration conf, CommandLine cmdLine,
-			Connection conn) throws SQLException, InterruptedException,
-			ExecutionException {
-		    String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt());
+	private int loadData(Configuration conf, CommandLine cmdLine) throws SQLException,
+            InterruptedException, ExecutionException, ClassNotFoundException {
+        String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt());
         String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPT.getOpt());
         String indexTableName = cmdLine.getOptionValue(INDEX_TABLE_NAME_OPT.getOpt());
         String qualifiedTableName = getQualifiedTableName(schemaName, tableName);
-        String qualifedIndexTableName = null;
-        if(indexTableName != null){
-        	qualifedIndexTableName = getQualifiedTableName(schemaName, indexTableName);
+        String qualifiedIndexTableName = null;
+        if (indexTableName != null){
+        	qualifiedIndexTableName = getQualifiedTableName(schemaName, indexTableName);
+        }
+
+        if (cmdLine.hasOption(ZK_QUORUM_OPT.getOpt())) {
+            // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job.
+            String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt());
+            PhoenixDriver.ConnectionInfo info = PhoenixDriver.ConnectionInfo.create(zkQuorum);
+            LOG.info("Configuring HBase connection to {}", info);
+            for (Map.Entry<String,String> entry : info.asProps()) {
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("Setting {} = {}", entry.getKey(), entry.getValue());
+                }
+                conf.set(entry.getKey(), entry.getValue());
+            }
+        }
+
+        final Connection conn = QueryUtil.getConnection(conf);
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(),
+                    qualifiedTableName);
         }
         List<ColumnInfo> importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName);
         configureOptions(cmdLine, importColumns, conf);
-
         try {
             validateTable(conn, schemaName, tableName);
         } finally {
             conn.close();
         }
 
-        Path inputPath = new Path(cmdLine.getOptionValue(INPUT_PATH_OPT.getOpt()));
-        Path outputPath = null;
+        final Path inputPath = new Path(cmdLine.getOptionValue(INPUT_PATH_OPT.getOpt()));
+        final Path outputPath;
         if (cmdLine.hasOption(OUTPUT_PATH_OPT.getOpt())) {
             outputPath = new Path(cmdLine.getOptionValue(OUTPUT_PATH_OPT.getOpt()));
         } else {
@@ -221,20 +234,21 @@ public class CsvBulkLoadTool extends Configured implements Tool {
         
         List<TargetTableRef> tablesToBeLoaded = new ArrayList<TargetTableRef>();
         tablesToBeLoaded.add(new TargetTableRef(qualifiedTableName));
+        // using conn after it's been closed... o.O
         tablesToBeLoaded.addAll(getIndexTables(conn, schemaName, qualifiedTableName));
         
         // When loading a single index table, check index table name is correct
-        if(qualifedIndexTableName != null){
+        if (qualifiedIndexTableName != null){
             TargetTableRef targetIndexRef = null;
         	for (TargetTableRef tmpTable : tablesToBeLoaded){
-        		if(tmpTable.getLogicalName().compareToIgnoreCase(qualifedIndexTableName) == 0) {
+        		if (tmpTable.getLogicalName().compareToIgnoreCase(qualifiedIndexTableName) == 0) {
                     targetIndexRef = tmpTable;
         			break;
         		}
         	}
-        	if(targetIndexRef == null){
+        	if (targetIndexRef == null){
                 throw new IllegalStateException("CSV Bulk Loader error: index table " +
-                    qualifedIndexTableName + " doesn't exist");
+                    qualifiedIndexTableName + " doesn't exist");
         	}
         	tablesToBeLoaded.clear();
         	tablesToBeLoaded.add(targetIndexRef);
@@ -247,13 +261,14 @@ public class CsvBulkLoadTool extends Configured implements Tool {
                 .getProps()
                 .getBoolean(QueryServices.METRICS_ENABLED,
                         QueryServicesOptions.DEFAULT_IS_METRICS_ENABLED);
-        ExecutorService executor =  JobManager.createThreadPoolExec(Integer.MAX_VALUE, 5, 20, useInstrumentedPool);
+        ExecutorService executor =
+                JobManager.createThreadPoolExec(Integer.MAX_VALUE, 5, 20, useInstrumentedPool);
         try{
 	        for (TargetTableRef table : tablesToBeLoaded) {
 	        	Path tablePath = new Path(outputPath, table.getPhysicalName());
 	        	Configuration jobConf = new Configuration(conf);
 	        	jobConf.set(CsvToKeyValueMapper.TABLE_NAME_CONFKEY, qualifiedTableName);
-	        	if(qualifiedTableName.compareToIgnoreCase(table.getLogicalName()) != 0) {
+	        	if (qualifiedTableName.compareToIgnoreCase(table.getLogicalName()) != 0) {
                     jobConf.set(CsvToKeyValueMapper.INDEX_TABLE_NAME_CONFKEY, table.getPhysicalName());
 	        	}
 	        	TableLoader tableLoader = new TableLoader(
@@ -274,14 +289,6 @@ public class CsvBulkLoadTool extends Configured implements Tool {
 		return retCode;
 	}
 
-    String getJdbcUrl(String zkQuorum) {
-        if (zkQuorum == null) {
-            LOG.warn("Defaulting to localhost for ZooKeeper quorum");
-            zkQuorum = "localhost:2181";
-        }
-        return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
-    }
-
     /**
      * Build up the list of columns to be imported. The list is taken from the command line if
      * present, otherwise it is taken from the table description.
@@ -327,9 +334,11 @@ public class CsvBulkLoadTool extends Configured implements Tool {
      * @param importColumns descriptors of columns to be imported
      * @param conf job configuration
      */
-    @VisibleForTesting
-    static void configureOptions(CommandLine cmdLine, List<ColumnInfo> importColumns,
-            Configuration conf) {
+    private static void configureOptions(CommandLine cmdLine, List<ColumnInfo> importColumns,
+            Configuration conf) throws SQLException {
+
+        // we don't parse ZK_QUORUM_OPT here because we need it in order to
+        // create the connection we need to build importColumns.
 
         char delimiterChar = ',';
         if (cmdLine.hasOption(DELIMITER_OPT.getOpt())) {
@@ -358,12 +367,6 @@ public class CsvBulkLoadTool extends Configured implements Tool {
             escapeChar = escapeString.charAt(0);
         }
 
-        if (cmdLine.hasOption(ZK_QUORUM_OPT.getOpt())) {
-            String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt());
-            LOG.info("Configuring ZK quorum to {}", zkQuorum);
-            conf.set(HConstants.ZOOKEEPER_QUORUM, zkQuorum);
-        }
-
         CsvBulkImportUtil.initCsvImportJob(
                 conf,
                 getQualifiedTableName(
@@ -493,7 +496,7 @@ public class CsvBulkLoadTool extends Configured implements Tool {
 	            job.setMapOutputKeyClass(ImmutableBytesWritable.class);
 	            job.setMapOutputValueClass(KeyValue.class);
 
-	            // initialize credentials to possibily run in a secure env
+	            // initialize credentials to possibly run in a secure env
 	            TableMapReduceUtil.initCredentials(job);
 
                 HTable htable = new HTable(conf, tableName);
@@ -522,8 +525,8 @@ public class CsvBulkLoadTool extends Configured implements Tool {
 	            }
 	            
 	            return true;
-            } catch(Exception ex) {
-            	LOG.error("Import job on table=" + tableName + " failed due to exception:" + ex);
+            } catch (Exception ex) {
+            	LOG.error("Import job on table=" + tableName + " failed due to exception.", ex);
             	return false;
             }
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
index 90cb854..c0328bd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.mapreduce;
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Iterator;
 import java.util.List;
@@ -32,7 +31,6 @@ import org.apache.commons.csv.CSVFormat;
 import org.apache.commons.csv.CSVParser;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -41,11 +39,11 @@ import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.util.CSVCommonsLoader;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.csv.CsvUpsertExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -108,7 +106,6 @@ public class CsvToKeyValueMapper extends Mapper<LongWritable,Text,ImmutableBytes
     protected void setup(Context context) throws IOException, InterruptedException {
 
         Configuration conf = context.getConfiguration();
-        String jdbcUrl = getJdbcUrl(conf);
 
         // pass client configuration into driver
         Properties clientInfos = new Properties();
@@ -118,12 +115,9 @@ public class CsvToKeyValueMapper extends Mapper<LongWritable,Text,ImmutableBytes
             clientInfos.setProperty(entry.getKey(), entry.getValue());
         }
         
-        // This statement also ensures that the driver class is loaded
-        LOG.info("Connection with driver {} with url {}", PhoenixDriver.class.getName(), jdbcUrl);
-
         try {
-            conn = (PhoenixConnection) DriverManager.getConnection(jdbcUrl, clientInfos);
-        } catch (SQLException e) {
+            conn = (PhoenixConnection) QueryUtil.getConnection(clientInfos, conf);
+        } catch (SQLException | ClassNotFoundException e) {
             throw new RuntimeException(e);
         }
 
@@ -189,20 +183,6 @@ public class CsvToKeyValueMapper extends Mapper<LongWritable,Text,ImmutableBytes
         }
     }
 
-    /**
-     * Build up the JDBC URL for connecting to Phoenix.
-     *
-     * @return the full JDBC URL for a Phoenix connection
-     */
-    @VisibleForTesting
-    static String getJdbcUrl(Configuration conf) {
-        String zkQuorum = conf.get(HConstants.ZOOKEEPER_QUORUM);
-        if (zkQuorum == null) {
-            throw new IllegalStateException(HConstants.ZOOKEEPER_QUORUM + " is not configured");
-        }
-        return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
-    }
-
     @VisibleForTesting
     CsvUpsertExecutor buildUpsertExecutor(Configuration conf) {
         String tableName = conf.get(TABLE_NAME_CONFKEY);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c86ea48..d6d5df9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -807,7 +807,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         boolean isMetaTable = SchemaUtil.isMetaTable(tableName);
         boolean tableExist = true;
         try {
-            logger.info("Found quorum: " + ZKConfig.getZKQuorumServersString(config));
+            final String quorum = ZKConfig.getZKQuorumServersString(config);
+            final String znode = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
+            logger.debug("Found quorum: " + quorum + ":" + znode);
             admin = new HBaseAdmin(config);
             try {
                 existingDesc = admin.getTableDescriptor(tableName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index d63a68f..bd38983 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -34,11 +34,13 @@ import javax.annotation.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.parse.WildcardParseNode;
@@ -49,8 +51,6 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
-import java.util.Iterator;
-import java.util.Map;
 
 public final class QueryUtil {
 
@@ -129,7 +129,7 @@ public final class QueryUtil {
      *
      * @param tableName name of the table for which the upsert statement is to be created
      * @param columns list of columns to be included in the upsert statement
-     * @param Hint hint to be added to the UPSERT statement.
+     * @param hint hint to be added to the UPSERT statement.
      * @return the created {@code UPSERT} statement
      */
     public static String constructUpsertStatement(String tableName, List<String> columns, Hint hint) {
@@ -222,13 +222,36 @@ public final class QueryUtil {
         return query.toString();
     }
 
-    public static String getUrl(String server) {
-        return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + server;
+    /**
+     * Create the Phoenix JDBC connection URL from the provided cluster connection details.
+     */
+    public static String getUrl(String zkQuorum) {
+        return getUrlInternal(zkQuorum, null, null);
+    }
+
+    /**
+     * Create the Phoenix JDBC connection URL from the provided cluster connection details.
+     */
+    public static String getUrl(String zkQuorum, int clientPort) {
+        return getUrlInternal(zkQuorum, clientPort, null);
+    }
+
+    /**
+     * Create the Phoenix JDBC connection URL from the provided cluster connection details.
+     */
+    public static String getUrl(String zkQuorum, String znodeParent) {
+        return getUrlInternal(zkQuorum, null, znodeParent);
+    }
+
+    /**
+     * Create the Phoenix JDBC connection URL from the provided cluster connection details.
+     */
+    public static String getUrl(String zkQuorum, int port, String znodeParent) {
+        return getUrlInternal(zkQuorum, port, znodeParent);
     }
 
-    public static String getUrl(String server, long port) {
-        String serverUrl = getUrl(server);
-        return serverUrl + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + port
+    private static String getUrlInternal(String zkQuorum, Integer port, String znodeParent) {
+        return new PhoenixEmbeddedDriver.ConnectionInfo(zkQuorum, port, znodeParent).toUrl()
                 + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
     }
 
@@ -274,6 +297,7 @@ public final class QueryUtil {
 
     public static String getConnectionUrl(Properties props, Configuration conf)
             throws ClassNotFoundException, SQLException {
+        // TODO: props is ignored!
         // make sure we load the phoenix driver
         Class.forName(PhoenixDriver.class.getName());
 
@@ -304,12 +328,15 @@ public final class QueryUtil {
         if (port == -1) {
             port = conf.getInt(QueryServices.ZOOKEEPER_PORT_ATTRIB, -1);
             if (port == -1) {
+                // TODO: fall back to the default in HConstants#DEFAULT_ZOOKEPER_CLIENT_PORT
                 throw new RuntimeException("Client zk port was not set!");
             }
         }
         server = Joiner.on(',').join(servers);
+        String znodeParent = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
+                HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
 
-        return getUrl(server, port);
+        return getUrl(server, port, znodeParent);
     }
     
     public static String getViewStatement(String schemaName, String tableName, String where) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
index 79f9ec6..083b205 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
@@ -40,9 +40,11 @@ public class PhoenixEmbeddedDriverTest {
             "jdbc:phoenix:localhost:123",
             "jdbc:phoenix:localhost:123;foo=bar",
             "jdbc:phoenix:localhost:123:/hbase",
-            "jdbc:phoenix:localhost:123:/hbase;foo=bas",
+            "jdbc:phoenix:localhost:123:/foo-bar",
+            "jdbc:phoenix:localhost:123:/foo-bar;foo=bas",
             "jdbc:phoenix:localhost:/hbase",
-            "jdbc:phoenix:localhost:/hbase;test=true",
+            "jdbc:phoenix:localhost:/foo-bar",
+            "jdbc:phoenix:localhost:/foo-bar;test=true",
             "jdbc:phoenix:v1,v2,v3",
             "jdbc:phoenix:v1,v2,v3;",
             "jdbc:phoenix:v1,v2,v3;test=true",
@@ -51,6 +53,7 @@ public class PhoenixEmbeddedDriverTest {
             "jdbc:phoenix:v1,v2,v3:123:/hbase",
             "jdbc:phoenix:v1,v2,v3:123:/hbase;test=false",
             "jdbc:phoenix:v1,v2,v3:123:/hbase:user/principal:/user.keytab;test=false",
+            "jdbc:phoenix:v1,v2,v3:123:/foo-bar:user/principal:/user.keytab;test=false",
             "jdbc:phoenix:v1,v2,v3:123:user/principal:/user.keytab;test=false",
             "jdbc:phoenix:v1,v2,v3:user/principal:/user.keytab;test=false",
             "jdbc:phoenix:v1,v2,v3:/hbase:user/principal:/user.keytab;test=false",
@@ -64,9 +67,11 @@ public class PhoenixEmbeddedDriverTest {
             new ConnectionInfo("localhost",123,null),
             new ConnectionInfo("localhost",123,null),
             new ConnectionInfo("localhost",123,"/hbase"),
-            new ConnectionInfo("localhost",123,"/hbase"),
-            new ConnectionInfo("localhost",null,"/hbase"),
+            new ConnectionInfo("localhost",123,"/foo-bar"),
+            new ConnectionInfo("localhost",123,"/foo-bar"),
             new ConnectionInfo("localhost",null,"/hbase"),
+            new ConnectionInfo("localhost",null,"/foo-bar"),
+            new ConnectionInfo("localhost",null,"/foo-bar"),
             new ConnectionInfo("v1,v2,v3",null,null),
             new ConnectionInfo("v1,v2,v3",null,null),
             new ConnectionInfo("v1,v2,v3",null,null),
@@ -75,6 +80,7 @@ public class PhoenixEmbeddedDriverTest {
             new ConnectionInfo("v1,v2,v3",123,"/hbase"),
             new ConnectionInfo("v1,v2,v3",123,"/hbase"),
             new ConnectionInfo("v1,v2,v3",123,"/hbase","user/principal", "/user.keytab" ),
+            new ConnectionInfo("v1,v2,v3",123,"/foo-bar","user/principal", "/user.keytab" ),
             new ConnectionInfo("v1,v2,v3",123, null,"user/principal", "/user.keytab" ),
             new ConnectionInfo("v1,v2,v3", null, null,"user/principal", "/user.keytab" ),
             new ConnectionInfo("v1,v2,v3",null,"/hbase","user/principal", "/user.keytab" ),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolTest.java
index 31fc71c..33bb976 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolTest.java
@@ -66,15 +66,4 @@ public class CsvBulkLoadToolTest {
     public void testGetQualifiedTableName_NullSchema() {
         assertEquals("MYTABLE", CsvBulkLoadTool.getQualifiedTableName(null, "myTable"));
     }
-
-    @Test
-    public void testGetJdbcUrl_WithQuorumSupplied() {
-        assertEquals("jdbc:phoenix:myzkhost:2181", bulkLoadTool.getJdbcUrl("myzkhost:2181"));
-    }
-
-    @Test
-    public void testGetJdbcUrl_NoQuorumSupplied() {
-        assertEquals("jdbc:phoenix:localhost:2181", bulkLoadTool.getJdbcUrl(null));
-    }
-
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java
index 4033a65..dc6f497 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java
@@ -95,21 +95,6 @@ public class CsvToKeyValueMapperTest {
     }
 
     @Test
-    public void testGetJdbcUrl() {
-        Configuration conf = new Configuration();
-        conf.set(HConstants.ZOOKEEPER_QUORUM, "myzkclient:2181");
-        String jdbcUrl = CsvToKeyValueMapper.getJdbcUrl(conf);
-
-        assertEquals("jdbc:phoenix:myzkclient:2181", jdbcUrl);
-    }
-
-    @Test(expected=IllegalStateException.class)
-    public void testGetJdbcUrl_NotConfigured() {
-        Configuration conf = new Configuration();
-        CsvToKeyValueMapper.getJdbcUrl(conf);
-    }
-
-    @Test
     public void testLoadPreUpdateProcessor() {
         Configuration conf = new Configuration();
         conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, MockUpsertProcessor.class,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afb0120e/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java
index beabaf1..8446e9e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java
@@ -17,10 +17,6 @@
  */
 package org.apache.phoenix.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 import java.sql.Types;
 import java.util.Properties;
 
@@ -30,6 +26,8 @@ import org.junit.Test;
 
 import com.google.common.collect.ImmutableList;
 
+import static org.junit.Assert.*;
+
 public class QueryUtilTest {
 
     private static final ColumnInfo ID_COLUMN = new ColumnInfo("ID", Types.BIGINT);
@@ -96,19 +94,28 @@ public class QueryUtilTest {
     }
 
     private void validateUrl(String url) {
-        String prefix = QueryUtil.getUrl("");
+        String prefix = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
         assertTrue("JDBC URL missing jdbc protocol prefix", url.startsWith(prefix));
-        //remove the prefix, should only be left with server,server...:port
-        url = url.substring(prefix.length()+1);
-        // make sure only a single ':'
-        assertEquals("More than a single ':' in url: "+url, url.indexOf(PhoenixRuntime
-                .JDBC_PROTOCOL_SEPARATOR),
-                url.lastIndexOf(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR));
+        assertTrue("JDBC URL missing jdbc terminator suffix", url.endsWith(";"));
+        // remove the prefix, should only be left with server[,server...]:port:/znode
+        url = url.substring(prefix.length());
+        String[] splits = url.split(":");
+        assertTrue("zk details should contain at least server component", splits.length >= 1);
         // make sure that each server is comma separated
-        url = url.substring(0, url.indexOf(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR));
-        String[] servers = url.split(",");
+        String[] servers = splits[0].split(",");
         for(String server: servers){
             assertFalse("Found whitespace in server names for url: " + url, server.contains(" "));
         }
+        if (splits.length >= 2) {
+            // second bit is a port number, should not through
+            try {
+                Integer.parseInt(splits[1]);
+            } catch (NumberFormatException e) {
+                fail(e.getMessage());
+            }
+        }
+        if (splits.length >= 3) {
+            assertTrue("znode parent is not an absolute path", splits[2].startsWith("/"));
+        }
     }
 }
\ No newline at end of file


[23/50] [abbrv] phoenix git commit: PHOENIX-1964 - Pherf tests write output in module base directory

Posted by ma...@apache.org.
PHOENIX-1964 - Pherf tests write output in module base directory


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d3ff0798
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d3ff0798
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d3ff0798

Branch: refs/heads/calcite
Commit: d3ff0798f3e87bb489e3c91f7d11813503fe7861
Parents: 981ed47
Author: cmarcel <cm...@salesforce.com>
Authored: Tue May 19 15:54:52 2015 -0700
Committer: cmarcel <cm...@salesforce.com>
Committed: Wed May 20 13:36:34 2015 -0700

----------------------------------------------------------------------
 phoenix-pherf/config/pherf.properties           |  3 ++
 .../org/apache/phoenix/pherf/DataIngestIT.java  |  3 +-
 .../apache/phoenix/pherf/ResultBaseTestIT.java  | 45 ++++++++++++++++++
 .../java/org/apache/phoenix/pherf/Pherf.java    |  7 +--
 .../apache/phoenix/pherf/PherfConstants.java    | 50 +++++++++++++++++++-
 .../phoenix/pherf/loaddata/DataLoader.java      |  2 +-
 .../apache/phoenix/pherf/result/ResultUtil.java |  4 +-
 .../pherf/result/impl/CSVResultHandler.java     |  5 +-
 .../pherf/result/impl/ImageResultHandler.java   |  5 +-
 .../pherf/result/impl/XMLResultHandler.java     |  6 ++-
 .../apache/phoenix/pherf/util/ResourceList.java | 26 ----------
 .../pherf/workload/WorkloadExecutor.java        |  2 +-
 .../phoenix/pherf/ConfigurationParserTest.java  |  2 +-
 .../org/apache/phoenix/pherf/ResourceTest.java  |  8 ++--
 .../apache/phoenix/pherf/ResultBaseTest.java    | 44 +++++++++++++++++
 .../org/apache/phoenix/pherf/ResultTest.java    |  5 +-
 16 files changed, 168 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/config/pherf.properties
----------------------------------------------------------------------
diff --git a/phoenix-pherf/config/pherf.properties b/phoenix-pherf/config/pherf.properties
index 354707a..1142f9b5 100644
--- a/phoenix-pherf/config/pherf.properties
+++ b/phoenix-pherf/config/pherf.properties
@@ -29,3 +29,6 @@ pherf.default.dataloader.threadpool=0
 # When upserting, this is the max # of rows that will be inserted in a single commit
 pherf.default.dataloader.batchsize=1000
 
+# Directory where results from a scenario run will be written
+pherf.default.results.dir=RESULTS
+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
index b29656d..2b56f43 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
@@ -18,7 +18,6 @@
 
 package org.apache.phoenix.pherf;
 
-import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
 import org.apache.phoenix.pherf.configuration.Column;
 import org.apache.phoenix.pherf.configuration.DataTypeMapping;
 import org.apache.phoenix.pherf.configuration.Scenario;
@@ -39,7 +38,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-public class DataIngestIT extends BaseHBaseManagedTimeIT {
+public class DataIngestIT extends ResultBaseTestIT {
     protected static PhoenixUtil util = new PhoenixUtil(true);
     static final String matcherScenario = ".*scenario/.*test.*xml";
     static final String matcherSchema = ".*datamodel/.*test.*sql";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
new file mode 100644
index 0000000..6e103b8
--- /dev/null
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   "License"); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
+import org.apache.phoenix.pherf.result.ResultUtil;
+import org.junit.BeforeClass;
+
+import java.util.Properties;
+
+public class ResultBaseTestIT extends BaseHBaseManagedTimeIT {
+    private static boolean isSetUpDone = false;
+
+    @BeforeClass
+    public static void setUp() throws Exception {
+        if (isSetUpDone) {
+            return;
+        }
+
+        ResultUtil util = new ResultUtil();
+        PherfConstants constants = PherfConstants.create();
+        Properties properties = constants.getProperties(PherfConstants.PHERF_PROPERTIES);
+        String dir = properties.getProperty("pherf.default.results.dir");
+        String targetDir = "target/" + dir;
+        properties.setProperty("pherf.default.results.dir", targetDir);
+        util.ensureBaseDirExists(targetDir);
+        isSetUpDone = true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index 0ccc49a..073c661 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -84,7 +84,7 @@ public class Pherf {
             System.exit(1);
         }
 
-        properties = getProperties();
+        properties = PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES);
         dropPherfTablesRegEx = command.getOptionValue("drop", null);
         monitor = command.hasOption("m");
         String monitorFrequency = (command.hasOption("m") && command.hasOption("monitorFrequency"))
@@ -192,9 +192,4 @@ public class Pherf {
             }
         }
     }
-
-    private static Properties getProperties() throws Exception {
-        ResourceList list = new ResourceList();
-        return list.getProperties();
-    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
index 22d18f6..493f5a8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
@@ -18,7 +18,13 @@
 
 package org.apache.phoenix.pherf;
 
+import java.io.InputStream;
+import java.util.Properties;
+
 public class PherfConstants {
+    private static PherfConstants instance = null;
+    private Properties properties = null;
+
     public static final int DEFAULT_THREAD_POOL_SIZE = 10;
     public static final int DEFAULT_BATCH_SIZE = 1000;
     public static final String DEFAULT_DATE_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS";
@@ -29,7 +35,7 @@ public class PherfConstants {
             ".*" + PherfConstants.RESOURCE_SCENARIO.substring(1) + ".*";
     public static final String SCHEMA_ROOT_PATTERN = ".*";
     public static final String PHERF_PROPERTIES = "pherf.properties";
-    public static final String RESULT_DIR = "RESULTS";
+//    public static final String RESULT_DIR = "RESULTS";
     public static final String EXPORT_DIR = "CSV_EXPORT";
     public static final String RESULT_PREFIX = "RESULT_";
     public static final String PATH_SEPARATOR = "/";
@@ -60,4 +66,46 @@ public class PherfConstants {
         PERFORMANCE,
         FUNCTIONAL
     }
+
+    private PherfConstants() {
+    }
+
+    public static PherfConstants create() {
+        if (instance == null) {
+            instance = new PherfConstants();
+        }
+        return instance;
+    }
+
+    public Properties getProperties(final String fileName) throws Exception {
+        if (properties != null) {
+            return properties;
+        }
+
+        properties = new Properties();
+        InputStream is = null;
+        try {
+            is = getClass().getClassLoader().getResourceAsStream(fileName);
+            properties.load(is);
+        } finally {
+            if (is != null) {
+                is.close();
+            }
+        }
+        return properties;
+    }
+
+    public String getProperty(String property) {
+        return getProperty(PherfConstants.PHERF_PROPERTIES, property);
+    }
+
+    public String getProperty(final String fileName, String property) {
+        String value = null;
+        try {
+            value = getProperties(fileName).getProperty(property);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+        return value;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
index abec1b4..c521822 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
@@ -67,7 +67,7 @@ public class DataLoader {
     }
 
     public DataLoader(PhoenixUtil phoenixUtil, XMLConfigParser parser) throws Exception{
-        this(phoenixUtil, new ResourceList().getProperties(), parser);
+        this(phoenixUtil, PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES), parser);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
index 360cb49..fd960d1 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
@@ -115,7 +115,9 @@ public class ResultUtil {
     }
 
     public void ensureBaseResultDirExists() {
-        ensureBaseDirExists(PherfConstants.RESULT_DIR);
+        PherfConstants constants = PherfConstants.create();
+        String resultDir = constants.getProperty("pherf.default.results.dir");
+        ensureBaseDirExists(resultDir);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
index ca470de..e7fbb48 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
@@ -53,8 +53,11 @@ public class CSVResultHandler implements ResultHandler {
 
     public CSVResultHandler(String resultFileName, ResultFileDetails resultFileDetails, boolean generateFullFileName) {
         this.util = new ResultUtil();
+        PherfConstants constants = PherfConstants.create();
+        String resultDir = constants.getProperty("pherf.default.results.dir");
+
         this.resultFileName = generateFullFileName ?
-                PherfConstants.RESULT_DIR + PherfConstants.PATH_SEPARATOR
+                resultDir + PherfConstants.PATH_SEPARATOR
                         + PherfConstants.RESULT_PREFIX
                         + resultFileName + util.getSuffix()
                         + resultFileDetails.getExtension().toString()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
index 6e66cf6..ad3c8fb 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
@@ -44,8 +44,11 @@ public class ImageResultHandler implements ResultHandler {
 
     public ImageResultHandler(String resultFileName, ResultFileDetails resultFileDetails, boolean generateFullFileName) {
         ResultUtil util = new ResultUtil();
+        PherfConstants constants = PherfConstants.create();
+        String resultDir = constants.getProperty("pherf.default.results.dir");
+
         this.resultFileName = generateFullFileName ?
-                PherfConstants.RESULT_DIR + PherfConstants.PATH_SEPARATOR
+                resultDir + PherfConstants.PATH_SEPARATOR
                         + PherfConstants.RESULT_PREFIX
                         + resultFileName + util.getSuffix()
                         + resultFileDetails.getExtension().toString()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
index a1d0930..8a913ed 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
@@ -30,6 +30,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Properties;
 
 public class XMLResultHandler implements ResultHandler {
     private final String resultFileName;
@@ -41,8 +42,11 @@ public class XMLResultHandler implements ResultHandler {
 
     public XMLResultHandler(String resultFileName, ResultFileDetails resultFileDetails, boolean generateFullFileName) {
         ResultUtil util = new ResultUtil();
+        PherfConstants constants = PherfConstants.create();
+        String resultDir = constants.getProperty("pherf.default.results.dir");
+
         this.resultFileName = generateFullFileName ?
-                PherfConstants.RESULT_DIR + PherfConstants.PATH_SEPARATOR
+                resultDir + PherfConstants.PATH_SEPARATOR
                         + PherfConstants.RESULT_PREFIX
                         + resultFileName + util.getSuffix()
                         + resultFileDetails.getExtension().toString()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
index ac1e74d..5359c35 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
@@ -18,14 +18,12 @@
 
 package org.apache.phoenix.pherf.util;
 
-import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.exception.PherfException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStream;
 import java.net.URI;
 import java.net.URL;
 import java.nio.file.Path;
@@ -43,17 +41,11 @@ public class ResourceList {
     private static final Logger logger = LoggerFactory.getLogger(ResourceList.class);
     private final String rootResourceDir;
 
-    public ResourceList() {
-        this("/");
-    }
-
     public ResourceList(String rootResourceDir) {
         this.rootResourceDir = rootResourceDir;
     }
 
     public Collection<Path> getResourceList(final String pattern) throws Exception {
-        Properties properties = getProperties();
-
         // Include files from config directory
         Collection<Path> paths = getResourcesPaths(Pattern.compile(pattern));
 
@@ -112,24 +104,6 @@ public class ResourceList {
         return paths;
     }
 
-    public Properties getProperties() throws Exception {
-        return getProperties(PherfConstants.PHERF_PROPERTIES);
-    }
-
-    public Properties getProperties(final String fileName) throws Exception {
-        Properties pherfProps = new Properties();
-        InputStream is = null;
-        try {
-            is = getClass().getClassLoader().getResourceAsStream(fileName);
-            pherfProps.load(is);
-        } finally {
-            if (is != null) {
-                is.close();
-            }
-        }
-        return pherfProps;
-    }
-
     private Collection<String> getResources(
             final String element,
             final Pattern pattern) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
index 0509bdc..cf2f038 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
@@ -45,7 +45,7 @@ public class WorkloadExecutor {
 
 
     public WorkloadExecutor() throws Exception {
-        this(new ResourceList().getProperties());
+        this(PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES));
     }
 
     public WorkloadExecutor(Properties properties) throws Exception{

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
index fcc353e..f362842 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
@@ -38,7 +38,7 @@ import javax.xml.bind.Marshaller;
 
 import static org.junit.Assert.*;
 
-public class ConfigurationParserTest {
+public class ConfigurationParserTest extends ResultBaseTest{
     private static final Logger logger = LoggerFactory.getLogger(ConfigurationParserTest.class);
 
     @Test

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java
index 042f9c3..cd567cb 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java
@@ -48,11 +48,9 @@ public class ResourceTest {
 
     @Test
     public void testResourceListPropertyDirectory() throws Exception {
-
-        ResourceList list = new ResourceList();
-        Properties properties = list.getProperties();
-        assertTrue("Property file list was empty", properties.size() > 0);
-        assertNotNull(properties.getProperty("pherf.default.dataloader.threadpool"));
+        PherfConstants constants = PherfConstants.create();
+        assertNotNull(constants.getProperty("pherf.default.dataloader.threadpool"));
+        assertNotNull(constants.getProperty("pherf.default.results.dir"));
     }
 
     private Collection<Path> assertResources(String pattern, String rootDir, String assertStr) throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java
new file mode 100644
index 0000000..5c455fc
--- /dev/null
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   "License"); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import org.apache.phoenix.pherf.result.ResultUtil;
+import org.junit.BeforeClass;
+
+import java.util.Properties;
+
+public class ResultBaseTest {
+    private static boolean isSetUpDone = false;
+
+    @BeforeClass
+    public static void setUp() throws Exception {
+        if (isSetUpDone) {
+            return;
+        }
+
+        ResultUtil util = new ResultUtil();
+        PherfConstants constants = PherfConstants.create();
+        Properties properties = constants.getProperties(PherfConstants.PHERF_PROPERTIES);
+        String dir = properties.getProperty("pherf.default.results.dir");
+        String targetDir = "target/" + dir;
+        properties.setProperty("pherf.default.results.dir", targetDir);
+        util.ensureBaseDirExists(targetDir);
+        isSetUpDone = true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d3ff0798/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
index c51f0dc..a202437 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
@@ -33,11 +33,12 @@ import org.apache.phoenix.pherf.result.file.ResultFileDetails;
 import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
 import org.apache.phoenix.pherf.result.impl.XMLResultHandler;
 import org.apache.phoenix.pherf.result.*;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.phoenix.pherf.configuration.Query;
 
-public class ResultTest {
+public class ResultTest extends ResultBaseTest {
 
     @Test
     public void testMonitorWriter() throws Exception {
@@ -96,7 +97,7 @@ public class ResultTest {
         records = monitor.readResults();
 
         assertNotNull("Could not retrieve records", records);
-        assertEquals("Failed to get correct amount of CSV records.", records.size(), monitor.getRowCount());
+        assertTrue("Failed to get correct CSV records.", records.size() > 0);
         assertFalse("Monitor was not stopped correctly.", monitor.isRunning());
     }
 


[19/50] [abbrv] phoenix git commit: PHOENIX-1980 CsvBulkLoad cannot load hbase-site.xml from classpath

Posted by ma...@apache.org.
PHOENIX-1980 CsvBulkLoad cannot load hbase-site.xml from classpath


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6fc53b57
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6fc53b57
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6fc53b57

Branch: refs/heads/calcite
Commit: 6fc53b5792ea7bdd1b486860606966e76f2e5e3f
Parents: 23f5acf
Author: Nick Dimiduk <nd...@apache.org>
Authored: Mon May 18 10:33:42 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Mon May 18 10:33:42 2015 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6fc53b57/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
index 31f8b42..a5a8aa1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
@@ -176,7 +176,7 @@ public class CsvBulkLoadTool extends Configured implements Tool {
     @Override
     public int run(String[] args) throws Exception {
 
-        Configuration conf = HBaseConfiguration.addHbaseResources(getConf());
+        Configuration conf = HBaseConfiguration.create(getConf());
 
         CommandLine cmdLine = null;
         try {


[16/50] [abbrv] phoenix git commit: PHOENIX-1965 Upgrade Pig to version 0.13 (Prashant Kommireddi)

Posted by ma...@apache.org.
PHOENIX-1965 Upgrade Pig to version 0.13 (Prashant Kommireddi)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a1032fba
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a1032fba
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a1032fba

Branch: refs/heads/calcite
Commit: a1032fba34164b9ac9c62d2187302cdc0e8b2846
Parents: c1e5c71
Author: Jesse Yates <jy...@apache.org>
Authored: Wed May 13 10:00:52 2015 -0700
Committer: Jesse Yates <jy...@apache.org>
Committed: Thu May 14 12:52:57 2015 -0700

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a1032fba/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 23ac578..eec1f2a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -85,7 +85,7 @@
     <!-- Dependency versions -->
     <commons-cli.version>1.2</commons-cli.version>
     <hadoop.version>2.5.1</hadoop.version>
-    <pig.version>0.12.0</pig.version>
+    <pig.version>0.13.0</pig.version>
     <jackson.version>1.8.8</jackson.version>
     <antlr.version>3.5</antlr.version>
     <log4j.version>1.2.17</log4j.version>


[22/50] [abbrv] phoenix git commit: PHOENIX-1995 client uberjar doesn't support dfs

Posted by ma...@apache.org.
PHOENIX-1995 client uberjar doesn't support dfs


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/981ed472
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/981ed472
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/981ed472

Branch: refs/heads/calcite
Commit: 981ed472cb597440fe7c3a2aaa088b103f8f7352
Parents: a4b4e0e
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed May 20 12:29:36 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed May 20 12:55:23 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/src/build/client.xml | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/981ed472/phoenix-assembly/src/build/client.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/client.xml b/phoenix-assembly/src/build/client.xml
index 101ccd6..4bd4544 100644
--- a/phoenix-assembly/src/build/client.xml
+++ b/phoenix-assembly/src/build/client.xml
@@ -29,6 +29,16 @@
     <format>jar</format>
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
+
+  <containerDescriptorHandlers>
+    <containerDescriptorHandler>
+      <!--
+          aggregate SPI's so that things like HDFS FileSystem works in uberjar
+          http://docs.oracle.com/javase/tutorial/sound/SPI-intro.html
+      -->
+      <handlerName>metaInf-services</handlerName>
+    </containerDescriptorHandler>
+  </containerDescriptorHandlers>
   
   <componentDescriptors>
     <componentDescriptor>src/build/components-minimal.xml</componentDescriptor>


[40/50] [abbrv] phoenix git commit: PHOENIX-777 Support null value for fixed length ARRAY (Dumindu Buddhika)

Posted by ma...@apache.org.
PHOENIX-777 Support null value for fixed length ARRAY (Dumindu Buddhika)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7f6bf10b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7f6bf10b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7f6bf10b

Branch: refs/heads/calcite
Commit: 7f6bf10b2cc54279b9210772323dc8f4d2939a19
Parents: 9e686b7
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Jun 2 11:13:44 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Jun 2 11:13:44 2015 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/ArraysWithNullsIT.java      | 300 +++++++++++++++++++
 .../phoenix/compile/ExpressionCompiler.java     |   9 +-
 .../apache/phoenix/schema/types/PBinary.java    |   2 +-
 .../org/apache/phoenix/schema/types/PChar.java  |   5 +-
 .../org/apache/phoenix/schema/types/PDate.java  |   6 +-
 .../apache/phoenix/schema/types/PDecimal.java   |   3 +
 .../apache/phoenix/schema/types/PTimestamp.java |  17 +-
 .../phoenix/schema/types/PhoenixArray.java      |  51 ++--
 8 files changed, 358 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
new file mode 100644
index 0000000..b034193
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+
+import java.sql.*;
+
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.junit.Test;
+
+public class ArraysWithNullsIT extends BaseClientManagedTimeIT {
+
+    @Test
+    public void testArrayUpsertIntWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t1 ( k VARCHAR PRIMARY KEY, a INTEGER[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t1 VALUES('a',ARRAY[null,3,null])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t1 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("INTEGER",new Object[]{null,3,null});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+
+
+    @Test
+    public void testArrayUpsertVarcharWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t2 ( k VARCHAR PRIMARY KEY, a VARCHAR[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t2 VALUES('a',ARRAY['10',null])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t2 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("VARCHAR",new Object[]{"10",null});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertBigIntWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t3 ( k VARCHAR PRIMARY KEY, a BIGINT[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t3 VALUES('a',ARRAY[2,null,32335,4])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t3 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("BIGINT",new Object[]{(long)2,null,(long)32335,(long)4});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertFloatWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t4 ( k VARCHAR PRIMARY KEY, a FLOAT[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t4 VALUES('a',ARRAY[1.1,2.2,null,3.4])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t4 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("FLOAT",new Object[]{(float)1.1,(float)2.2,null,(float)3.4});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertSmallIntWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t5 ( k VARCHAR PRIMARY KEY, a SMALLINT[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t5 VALUES('a',ARRAY[123,456,null,456])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t5 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("SMALLINT",new Object[]{(short)123,(short)456,null,(short)456});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertTinyIntWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t6 ( k VARCHAR PRIMARY KEY, a TINYINT[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t6 VALUES('a',ARRAY[123,45,null,45])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t6 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("TINYINT",new Object[]{(byte)123,(byte)45,null,(byte)45});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertBooleanWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t7 ( k VARCHAR PRIMARY KEY, a BOOLEAN[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t7 VALUES('a',ARRAY[true,false,null,true])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t7 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("BOOLEAN",new Object[]{true,false,null,true});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertDoubleWithNulls() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t8 ( k VARCHAR PRIMARY KEY, a DOUBLE[])");
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t8 VALUES('a',ARRAY[1.2,2.3,null,3.4])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t8 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("DOUBLE",new Object[]{1.2,2.3,null,3.4});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+
+    }
+
+    @Test
+    public void testArrayUpsertDateWithNulls1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t9 ( k VARCHAR PRIMARY KEY, a DATE[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t9 VALUES('a',ARRAY[TO_DATE('2015-05-20 06:12:14.184'),null,TO_DATE('2015-05-20 06:12:14.184'),null])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t9 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("DATE",new Date[]{new Date(1432102334184l),new Date(0l),new Date(1432102334184l),new Date(0l)});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertDateWithNulls2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t10 ( k VARCHAR PRIMARY KEY, a DATE[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t10 VALUES('a',ARRAY[TO_DATE('1970-01-01 00:00:00.000'), TO_DATE('2015-05-20 06:12:14.184'),TO_DATE('2015-05-20 06:12:14.184')])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t10 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("DATE",new Date[]{new Date(0l), new Date(1432102334184l), new Date(1432102334184l)});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertTimeWithNulls1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t11 ( k VARCHAR PRIMARY KEY, a TIME[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t11 VALUES('a',ARRAY[TO_TIME('2015-05-20 06:12:14.184'),null,TO_TIME('2015-05-20 06:12:14.184'),null])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t11 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("TIME",new Time[]{new Time(1432102334184l),new Time(0l),new Time(1432102334184l),new Time(0l)});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertTimeWithNulls2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t12 ( k VARCHAR PRIMARY KEY, a TIME[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t12 VALUES('a',ARRAY[TO_TIME('1970-01-01 00:00:00.000'), TO_TIME('2015-05-20 06:12:14.184'),null,TO_TIME('2015-05-20 06:12:14.184'),null])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t12 where k = 'a'");
+        rs.next();
+        Array array = conn.createArrayOf("TIME",new Time[]{new Time(0l),new Time(1432102334184l),new Time(0l),new Time(1432102334184l),new Time(0l)});
+
+        assertEquals(rs.getArray(1),array);
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertTimeStampWithNulls1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t13 ( k VARCHAR PRIMARY KEY, a TIMESTAMP[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t13 VALUES('a',ARRAY[TO_TIMESTAMP('2015-05-20 06:12:14.184'),null,TO_TIMESTAMP('2015-05-20 06:12:14.184'),TO_TIMESTAMP('1970-01-01 00:00:00.000')])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t13 where k = 'a'");
+        rs.next();
+
+        assertEquals(rs.getArray(1),conn.createArrayOf("TIMESTAMP",new Timestamp[]{new Timestamp(1432102334184l),new Timestamp(0l),new Timestamp(1432102334184l),new Timestamp(0l)}));
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertTimeStampWithNulls2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t14 ( k VARCHAR PRIMARY KEY, a TIMESTAMP[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t14 VALUES('a',ARRAY[TO_TIMESTAMP('1970-01-01 00:00:00.000'),TO_TIMESTAMP('2015-05-20 06:12:14.184'),TO_TIMESTAMP('1970-01-01 00:00:00.000'),TO_TIMESTAMP('2015-05-20 06:12:14.184'),TO_TIMESTAMP('1970-01-01 00:00:00.000')])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t14 where k = 'a'");
+        rs.next();
+
+        assertEquals(rs.getArray(1),conn.createArrayOf("TIMESTAMP",new Timestamp[]{new Timestamp(0l),new Timestamp(1432102334184l),new Timestamp(0l),new Timestamp(1432102334184l),new Timestamp(0l)}));
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertCharWithNulls1() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t15 ( k VARCHAR PRIMARY KEY, a CHAR(15)[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t15 VALUES('a',ARRAY['foo',null,'fo','foo'])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t15 where k = 'a'");
+        rs.next();
+
+        assertEquals(rs.getArray(1),conn.createArrayOf("CHAR",new String[]{"foo","","fo","foo"}));
+        conn.close();
+    }
+
+    @Test
+    public void testArrayUpsertCharWithNulls2() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t16 ( k VARCHAR PRIMARY KEY, a CHAR(15)[])");
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t16 VALUES('a',ARRAY[null,'foo',null,'fo','foo'])");
+        stmt.execute();
+        conn.commit();
+
+        ResultSet rs = conn.createStatement().executeQuery("Select a from t16 where k = 'a'");
+        rs.next();
+
+        assertEquals(rs.getArray(1),conn.createArrayOf("CHAR",new String[]{"","foo","","fo","foo"}));
+        conn.close();
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 66c1b85..39baf7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -1271,8 +1271,13 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
             for (int i = 0; i < children.size(); i++) {
                 Expression child = children.get(i);
                 child.evaluate(null, ptr);
-                Object value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder());
-                elements[i] = LiteralExpression.newConstant(value, child.getDataType(), child.getDeterminism()).getValue();
+                Object value = null;
+                if (child.getDataType() == null) {
+                    value = arrayElemDataType.toObject(ptr, theArrayElemDataType, child.getSortOrder());
+                } else {
+                    value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder());
+                }
+                elements[i] = LiteralExpression.newConstant(value, theArrayElemDataType, child.getDeterminism()).getValue();
             }
             Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements);
             return LiteralExpression.newConstant(value,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
index d6d07fd..b397554 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
@@ -48,7 +48,7 @@ public class PBinary extends PDataType<byte[]> {
   public Object pad(Object object, Integer maxLength) {
     byte[] b = (byte[]) object;
     if (b == null) {
-      return null;
+      return new byte[maxLength];
     }
     if (b.length == maxLength) {
       return object;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
index 2effc38..c4d482c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.DataExceedsCapacityException;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.StringUtil;
 
 import com.google.common.base.Strings;
@@ -55,7 +56,7 @@ public class PChar extends PDataType<String> {
     public Object pad(Object object, Integer maxLength) {
       String s = (String) object;
       if (s == null) {
-        return s;
+        return Strings.padEnd("", maxLength, ' ');
       }
       if (s.length() == maxLength) {
         return object;
@@ -69,7 +70,7 @@ public class PChar extends PDataType<String> {
     @Override
     public byte[] toBytes(Object object) {
       if (object == null) {
-        throw newIllegalDataException(this + " may not be null");
+        return ByteUtil.EMPTY_BYTE_ARRAY;
       }
       byte[] b = PVarchar.INSTANCE.toBytes(object);
       if (b.length != ((String) object).length()) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
index 947c30e..b10b1ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDate.java
@@ -39,9 +39,6 @@ public class PDate extends PDataType<Date> {
 
   @Override
   public byte[] toBytes(Object object) {
-    if (object == null) {
-      throw newIllegalDataException(this + " may not be null");
-    }
     byte[] bytes = new byte[getByteSize()];
     toBytes(object, bytes, 0);
     return bytes;
@@ -50,7 +47,8 @@ public class PDate extends PDataType<Date> {
   @Override
   public int toBytes(Object object, byte[] bytes, int offset) {
     if (object == null) {
-      throw newIllegalDataException(this + " may not be null");
+        getCodec().encodeLong(0l, bytes, offset);
+        return this.getByteSize();
     }
     getCodec().encodeLong(((java.util.Date) object).getTime(), bytes, offset);
     return this.getByteSize();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
index 656113c..199ed28 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
@@ -393,6 +393,9 @@ public class PDecimal extends PRealNumber<BigDecimal> {
   @Override
   public String toStringLiteral(Object o, Format formatter) {
       if (formatter == null) {
+          if(o == null) {
+              return String.valueOf(o);
+          }
           return ((BigDecimal)o).toPlainString();
         }
         return super.toStringLiteral(o, formatter);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index 9a82cc0..d396adc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -39,9 +39,6 @@ public class PTimestamp extends PDataType<Timestamp> {
 
   @Override
   public byte[] toBytes(Object object) {
-    if (object == null) {
-      throw newIllegalDataException(this + " may not be null");
-    }
     byte[] bytes = new byte[getByteSize()];
     toBytes(object, bytes, 0);
     return bytes;
@@ -50,16 +47,18 @@ public class PTimestamp extends PDataType<Timestamp> {
   @Override
   public int toBytes(Object object, byte[] bytes, int offset) {
     if (object == null) {
-      throw newIllegalDataException(this + " may not be null");
+      PDate.INSTANCE.getCodec().encodeLong(0l, bytes, offset);
+      Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, 0);
+      return getByteSize();
     }
     java.sql.Timestamp value = (java.sql.Timestamp) object;
     PDate.INSTANCE.getCodec().encodeLong(value.getTime(), bytes, offset);
 
-            /*
-             * By not getting the stuff that got spilled over from the millis part,
-             * it leaves the timestamp's byte representation saner - 8 bytes of millis | 4 bytes of nanos.
-             * Also, it enables timestamp bytes to be directly compared with date/time bytes.
-             */
+    /*
+     * By not getting the stuff that got spilled over from the millis part,
+     * it leaves the timestamp's byte representation saner - 8 bytes of millis | 4 bytes of nanos.
+     * Also, it enables timestamp bytes to be directly compared with date/time bytes.
+     */
     Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, value.getNanos() % 1000000);
     return getByteSize();
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f6bf10b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java
index 9ffac83..843c831 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java
@@ -23,6 +23,7 @@ import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Map;
 
+import com.google.common.base.Strings;
 import org.apache.phoenix.util.SQLCloseable;
 
 /**
@@ -54,11 +55,15 @@ public class PhoenixArray implements Array,SQLCloseable {
 	private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) {
         Object[] resizedElements = new Object[elements.length];
         for (int i = 0; i < elements.length; i++) {
-            int length = baseType.getMaxLength(elements[i]);
-            if (length == maxLength) {
-                resizedElements[i] = elements[i];
+            Integer length = baseType.getMaxLength(elements[i]);
+            if (length != null) {
+                if (length == maxLength) {
+                    resizedElements[i] = elements[i];
+                } else {
+                    resizedElements[i] = baseType.pad(elements[i], maxLength);
+                }
             } else {
-                resizedElements[i] = baseType.pad(elements[i],maxLength);
+                resizedElements[i] = baseType.pad(elements[i], maxLength);
             }
         }
         return resizedElements;
@@ -67,18 +72,25 @@ public class PhoenixArray implements Array,SQLCloseable {
 	    if (elements == null || elements.length == 0) {
 	        return elements;
 	    }
-	    Object element = elements[0];
-	    int maxLength = baseType.getMaxLength(element);
-	    boolean resizeElements = false;
-	    for (int i = 1; i < elements.length; i++) {
-	        int length = baseType.getMaxLength(elements[i]);
-	        if (length > maxLength) {
-	            maxLength = length;
-	            resizeElements = true;
-	        } else if (length < maxLength) {
-	            resizeElements = true;
-	        }
-	    }
+        int maxLength = 0;
+        boolean resizeElements = false;
+        for (int i = 0; i < elements.length; i++) {
+            Integer length = baseType.getMaxLength(elements[i]);
+            if (length != null) {
+                if (maxLength == 0){
+                    maxLength = length;
+                    continue;
+                }
+                if (length > maxLength) {
+                    maxLength = length;
+                    resizeElements = true;
+                } else if (length < maxLength) {
+                    resizeElements = true;
+                }
+            } else {
+                resizeElements = true;
+            }
+        }
 	    if (!resizeElements) {
 	        return elements;
 	    }
@@ -92,7 +104,12 @@ public class PhoenixArray implements Array,SQLCloseable {
 		    if (baseType.getByteSize() == null) {
     		    elements = coerceToEqualLength(baseType, elements);
     		    if (elements != null && elements.length > 0) {
-    		        this.maxLength = baseType.getMaxLength(elements[0]);
+                    for(int i = 0; i < elements.length; i++) {
+                        if(elements[i] != null) {
+                            maxLength = baseType.getMaxLength(elements[i]);
+                            break;
+                        }
+                    }
     		    }
 		    } else {
 		        maxLength = baseType.getByteSize();


[48/50] [abbrv] phoenix git commit: PHOENIX-2034 Update pre-commit branches

Posted by ma...@apache.org.
PHOENIX-2034 Update pre-commit branches


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bfb0eee7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bfb0eee7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bfb0eee7

Branch: refs/heads/calcite
Commit: bfb0eee75b2ce0edb931599a9b024d3cd7160ae9
Parents: 67fea16
Author: Nick Dimiduk <nd...@apache.org>
Authored: Fri Jun 12 10:06:59 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Fri Jun 12 10:06:59 2015 -0700

----------------------------------------------------------------------
 dev/test-patch.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bfb0eee7/dev/test-patch.properties
----------------------------------------------------------------------
diff --git a/dev/test-patch.properties b/dev/test-patch.properties
index 6a82eee..53f2ad4 100644
--- a/dev/test-patch.properties
+++ b/dev/test-patch.properties
@@ -27,7 +27,7 @@ MAX_LINE_LENGTH=100
 # All supported branches for testing with precommit build
 # be sure to consider branch name prefixes in the order, ie, 4.x should appear
 # before 4 since the latter is a prefix
-BRANCH_NAMES="3.0 3.2 4.0.1 4.2 4.3 4.x-HBase-0.98 master"
+BRANCH_NAMES="4.4-HBase-0.98 4.4-HBase-1.0 4.4-HBase-1.1 4.x-HBase-0.98 4.x-HBase-1.0 4.x-HBase-1.1 master"
 
 
 # All supported Hadoop versions that we want to test the compilation with


[06/50] [abbrv] phoenix git commit: PHOENIX-1908 TenantSpecificTablesDDLIT#testAddDropColumn is flaky(Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-1908 TenantSpecificTablesDDLIT#testAddDropColumn is flaky(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d223f2c3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d223f2c3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d223f2c3

Branch: refs/heads/calcite
Commit: d223f2c3997bcd8f85c8dcae3703ceb39036662d
Parents: 70de0cd
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Apr 30 19:05:57 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Apr 30 19:05:57 2015 +0530

----------------------------------------------------------------------
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d223f2c3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 3ee527a..e613007 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1795,7 +1795,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                                     // column, get lock and drop the index. If found as covered
                                     // column, delete from index (do this client side?).
                                     // In either case, invalidate index if the column is in it
-                                    PhoenixConnection connection = QueryUtil.getConnection(env.getConfiguration()).unwrap(PhoenixConnection.class);
+                                    PhoenixConnection connection = table.getIndexes().isEmpty() ? null : QueryUtil.getConnection(env.getConfiguration()).unwrap(PhoenixConnection.class);
                                     for (PTable index : table.getIndexes()) {
                                         try {
                                             IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);


[02/50] [abbrv] phoenix git commit: PHOENIX-1856 Include min row key for each region in stats row -addendum(Ram)

Posted by ma...@apache.org.
PHOENIX-1856 Include min row key for each region in stats row -addendum(Ram)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/902cf0de
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/902cf0de
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/902cf0de

Branch: refs/heads/calcite
Commit: 902cf0de317db917ae320193ba51ec3588611ede
Parents: 864faba
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Apr 30 02:05:14 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Apr 30 02:05:14 2015 +0530

----------------------------------------------------------------------
 .../java/org/apache/phoenix/schema/stats/StatisticsCollector.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/902cf0de/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 382cead..8e41d4e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -169,7 +169,7 @@ public class StatisticsCollector {
             rowTracker = 
                     new ArrayList<GuidePostsInfo>();
         }
-        if (minKey == null) {
+        if (minKey == null && !results.isEmpty()) {
             Cell minCell = results.get(0);
             minKey = minCell.getRowArray();
             minKeyOffset =  minCell.getRowOffset();


[31/50] [abbrv] phoenix git commit: PHOENIX-2005 Connection utilities omit zk client port, parent znode (addendum)

Posted by ma...@apache.org.
PHOENIX-2005 Connection utilities omit zk client port, parent znode (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e493215b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e493215b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e493215b

Branch: refs/heads/calcite
Commit: e493215bff7057bad1a52efecca90384a1dd9412
Parents: afb0120
Author: Nick Dimiduk <nd...@apache.org>
Authored: Tue May 26 17:41:04 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Tue May 26 17:41:04 2015 -0700

----------------------------------------------------------------------
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java     |  2 +-
 .../java/org/apache/phoenix/util/QueryUtil.java |  2 +-
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java | 20 ++++++++++++++++++++
 3 files changed, 22 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e493215b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 2451603..3cfaacc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -209,7 +209,7 @@ public abstract class PhoenixEmbeddedDriver implements Driver, org.apache.phoeni
             url = url == null ? "" : url;
             url = url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)
                     ? url.substring(PhoenixRuntime.JDBC_PROTOCOL.length())
-                    : url;
+                    : PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + url;
             StringTokenizer tokenizer = new StringTokenizer(url, DELIMITERS, true);
             int nTokens = 0;
             String[] tokens = new String[5];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e493215b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index bd38983..a2d4a91 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -290,7 +290,7 @@ public final class QueryUtil {
             throws ClassNotFoundException,
             SQLException {
         String url = getConnectionUrl(props, conf);
-        LOG.info("Creating connection with the jdbc url:" + url);
+        LOG.info("Creating connection with the jdbc url: " + url);
         PropertiesUtil.extractProperties(props, conf);
         return DriverManager.getConnection(url, props);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e493215b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
index 083b205..4eda825 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
@@ -34,23 +34,33 @@ public class PhoenixEmbeddedDriverTest {
     @Test
     public void testGetConnectionInfo() throws SQLException {
         String[] urls = new String[] {
+            null,
+            "",
             "jdbc:phoenix",
             "jdbc:phoenix;test=true",
             "jdbc:phoenix:localhost",
+            "localhost",
+            "localhost;",
             "jdbc:phoenix:localhost:123",
             "jdbc:phoenix:localhost:123;foo=bar",
+            "localhost:123",
             "jdbc:phoenix:localhost:123:/hbase",
             "jdbc:phoenix:localhost:123:/foo-bar",
             "jdbc:phoenix:localhost:123:/foo-bar;foo=bas",
+            "localhost:123:/foo-bar",
             "jdbc:phoenix:localhost:/hbase",
             "jdbc:phoenix:localhost:/foo-bar",
             "jdbc:phoenix:localhost:/foo-bar;test=true",
+            "localhost:/foo-bar",
             "jdbc:phoenix:v1,v2,v3",
             "jdbc:phoenix:v1,v2,v3;",
             "jdbc:phoenix:v1,v2,v3;test=true",
+            "v1,v2,v3",
             "jdbc:phoenix:v1,v2,v3:/hbase",
             "jdbc:phoenix:v1,v2,v3:/hbase;test=true",
+            "v1,v2,v3:/foo-bar",
             "jdbc:phoenix:v1,v2,v3:123:/hbase",
+            "v1,v2,v3:123:/hbase",
             "jdbc:phoenix:v1,v2,v3:123:/hbase;test=false",
             "jdbc:phoenix:v1,v2,v3:123:/hbase:user/principal:/user.keytab;test=false",
             "jdbc:phoenix:v1,v2,v3:123:/foo-bar:user/principal:/user.keytab;test=false",
@@ -63,20 +73,30 @@ public class PhoenixEmbeddedDriverTest {
         ConnectionInfo[] infos = new ConnectionInfo[] {
             new ConnectionInfo(null,null,null),
             new ConnectionInfo(null,null,null),
+            new ConnectionInfo(null,null,null),
+            new ConnectionInfo(null,null,null),
+            new ConnectionInfo("localhost",null,null),
             new ConnectionInfo("localhost",null,null),
+            new ConnectionInfo("localhost",null,null),
+            new ConnectionInfo("localhost",123,null),
             new ConnectionInfo("localhost",123,null),
             new ConnectionInfo("localhost",123,null),
             new ConnectionInfo("localhost",123,"/hbase"),
             new ConnectionInfo("localhost",123,"/foo-bar"),
             new ConnectionInfo("localhost",123,"/foo-bar"),
+            new ConnectionInfo("localhost",123,"/foo-bar"),
             new ConnectionInfo("localhost",null,"/hbase"),
             new ConnectionInfo("localhost",null,"/foo-bar"),
             new ConnectionInfo("localhost",null,"/foo-bar"),
+            new ConnectionInfo("localhost",null,"/foo-bar"),
+            new ConnectionInfo("v1,v2,v3",null,null),
             new ConnectionInfo("v1,v2,v3",null,null),
             new ConnectionInfo("v1,v2,v3",null,null),
             new ConnectionInfo("v1,v2,v3",null,null),
             new ConnectionInfo("v1,v2,v3",null,"/hbase"),
             new ConnectionInfo("v1,v2,v3",null,"/hbase"),
+            new ConnectionInfo("v1,v2,v3",null,"/foo-bar"),
+            new ConnectionInfo("v1,v2,v3",123,"/hbase"),
             new ConnectionInfo("v1,v2,v3",123,"/hbase"),
             new ConnectionInfo("v1,v2,v3",123,"/hbase"),
             new ConnectionInfo("v1,v2,v3",123,"/hbase","user/principal", "/user.keytab" ),


[44/50] [abbrv] phoenix git commit: PHOENIX-1978 UDF ArgumentTypeMismatchException(Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-1978 UDF ArgumentTypeMismatchException(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b3ed60bb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b3ed60bb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b3ed60bb

Branch: refs/heads/calcite
Commit: b3ed60bb935a09c3ed07a6d77502136c9b8a6eef
Parents: e54c99d
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Fri Jun 5 09:02:31 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Fri Jun 5 09:02:31 2015 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 58 ++++++++++++++++++--
 phoenix-core/src/main/antlr3/PhoenixSQL.g       | 17 +++---
 2 files changed, 61 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3ed60bb/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index 868e19d..c6bd62f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -58,6 +58,8 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.FunctionAlreadyExistsException;
 import org.apache.phoenix.schema.FunctionNotFoundException;
 import org.apache.phoenix.schema.ValueRangeExcpetion;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -121,11 +123,31 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
                     .append("        ptr.set(PInteger.INSTANCE.toBytes((Integer)sum));\n")
                     .append("        return true;\n")
                     .append("    }\n").toString();
-
+    private static String ARRAY_INDEX_EVALUATE_METHOD =
+            new StringBuffer()
+                    .append("    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {\n")
+                    .append("        Expression indexExpr = children.get(1);\n")
+                    .append("        if (!indexExpr.evaluate(tuple, ptr)) {\n")
+                    .append("           return false;\n")
+                    .append("        } else if (ptr.getLength() == 0) {\n")
+                    .append("           return true;\n")
+                    .append("        }\n")
+                    .append("        // Use Codec to prevent Integer object allocation\n")
+                    .append("        int index = PInteger.INSTANCE.getCodec().decodeInt(ptr, indexExpr.getSortOrder());\n")
+                    .append("        if(index < 0) {\n")
+                    .append("           throw new ParseException(\"Index cannot be negative :\" + index);\n")
+                    .append("        }\n")
+                    .append("        Expression arrayExpr = children.get(0);\n")
+                    .append("        return PArrayDataType.positionAtArrayElement(tuple, ptr, index, arrayExpr, getDataType(),getMaxLength());\n")
+                    .append("    }\n").toString();
+    
+    
     private static String MY_REVERSE_CLASS_NAME = "MyReverse";
     private static String MY_SUM_CLASS_NAME = "MySum";
-    private static String MY_REVERSE_PROGRAM = getProgram(MY_REVERSE_CLASS_NAME, STRING_REVERSE_EVALUATE_METHOD, "PVarchar");
-    private static String MY_SUM_PROGRAM = getProgram(MY_SUM_CLASS_NAME, SUM_COLUMN_VALUES_EVALUATE_METHOD, "PInteger");
+    private static String MY_ARRAY_INDEX_CLASS_NAME = "MyArrayIndex";
+    private static String MY_REVERSE_PROGRAM = getProgram(MY_REVERSE_CLASS_NAME, STRING_REVERSE_EVALUATE_METHOD, "return PVarchar.INSTANCE;");
+    private static String MY_SUM_PROGRAM = getProgram(MY_SUM_CLASS_NAME, SUM_COLUMN_VALUES_EVALUATE_METHOD, "return PInteger.INSTANCE;");
+    private static String MY_ARRAY_INDEX_PROGRAM = getProgram(MY_ARRAY_INDEX_CLASS_NAME, ARRAY_INDEX_EVALUATE_METHOD, "return PDataType.fromTypeId(children.get(0).getDataType().getSqlType()- PDataType.ARRAY_TYPE_BASE);");
     private static Properties EMPTY_PROPS = new Properties();
     
 
@@ -144,6 +166,8 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
                 .append("import org.apache.phoenix.schema.types.PInteger;\n")
                 .append("import org.apache.phoenix.schema.types.PVarchar;\n")
                 .append("import org.apache.phoenix.util.StringUtil;\n")
+                .append("import org.apache.phoenix.schema.types.PArrayDataType;\n")
+                .append("import org.apache.phoenix.parse.ParseException;\n")
                 .append("public class "+className+" extends ScalarFunction{\n")
                 .append("    public static final String NAME = \"MY_REVERSE\";\n")
                 .append("    public "+className+"() {\n")
@@ -159,7 +183,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
                 .append("    }\n")
                 .append("  @Override\n")
                 .append("   public PDataType getDataType() {\n")
-                .append("       return "+returnType+".INSTANCE;\n")
+                .append(returnType+"\n")
                 .append("    }\n")
                 .append("    @Override\n")
                 .append("    public String getName() {\n")
@@ -181,6 +205,8 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
         UDFExpression.setConfig(conf);
         compileTestClass(MY_REVERSE_CLASS_NAME, MY_REVERSE_PROGRAM, 1);
         compileTestClass(MY_SUM_CLASS_NAME, MY_SUM_PROGRAM, 2);
+        compileTestClass(MY_ARRAY_INDEX_CLASS_NAME, MY_ARRAY_INDEX_PROGRAM, 3);
+
         String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
         url =
                 JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR
@@ -265,6 +291,28 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
         } catch(FunctionNotFoundException e) {
             
         }
+        conn.createStatement().execute("CREATE TABLE TESTTABLE10(ID VARCHAR NOT NULL, NAME VARCHAR ARRAY, CITY VARCHAR ARRAY CONSTRAINT pk PRIMARY KEY (ID) )");
+        conn.createStatement().execute("create function UDF_ARRAY_ELEM(VARCHAR ARRAY, INTEGER) returns VARCHAR as 'org.apache.phoenix.end2end."+MY_ARRAY_INDEX_CLASS_NAME+"' using jar "
+                + "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar3.jar"+"'");
+        conn.createStatement().execute("UPSERT INTO TESTTABLE10(ID,NAME,CITY) VALUES('111', ARRAY['JOHN','MIKE','BOB'], ARRAY['NYC','LA','SF'])");
+        conn.createStatement().execute("UPSERT INTO TESTTABLE10(ID,NAME,CITY) VALUES('112', ARRAY['CHEN','CARL','ALICE'], ARRAY['BOSTON','WASHINGTON','PALO ALTO'])");
+        conn.commit();
+        rs = conn.createStatement().executeQuery("SELECT ID, UDF_ARRAY_ELEM(NAME, 2) FROM TESTTABLE10");
+        assertTrue(rs.next());
+        assertEquals("111", rs.getString(1));
+        assertEquals("MIKE", rs.getString(2));
+        assertTrue(rs.next());
+        assertEquals("112", rs.getString(1));
+        assertEquals("CARL", rs.getString(2));
+        assertFalse(rs.next());
+        rs = conn2.createStatement().executeQuery("SELECT ID, UDF_ARRAY_ELEM(NAME, 2) FROM TESTTABLE10");
+        assertTrue(rs.next());
+        assertEquals("111", rs.getString(1));
+        assertEquals("MIKE", rs.getString(2));
+        assertTrue(rs.next());
+        assertEquals("112", rs.getString(1));
+        assertEquals("CARL", rs.getString(2));
+        assertFalse(rs.next());
     }
 
     @Test
@@ -326,7 +374,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
         rs = tenant2Conn.createStatement().executeQuery("select * from t7 where MYFUNCTION(k1)=12");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
-        assertEquals(2, rs.getInt(2));        
+        assertEquals(2, rs.getInt(2)); 
         assertEquals("jock", rs.getString(3));
         assertFalse(rs.next());
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3ed60bb/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index d2bb241..4f7cb34 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -917,17 +917,16 @@ zero_or_more_expressions returns [List<ParseNode> ret]
 
 zero_or_more_data_types returns [List<FunctionArgument> ret]
 @init{ret = new ArrayList<FunctionArgument>(); }
-    : (dt = identifier (LPAREN l=NUMBER (COMMA s=NUMBER)? RPAREN)? ar=ARRAY? (lsq=LSQUARE (a=NUMBER)? RSQUARE)? (c = CONSTANT)? (DEFAULTVALUE EQ dv = value_expression)? (MINVALUE EQ minv = value_expression)?  (MAXVALUE EQ maxv = value_expression)? 
-    {$ret.add(new FunctionArgument(dt,  ar != null || lsq != null, c!=null, 
-    dv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)dv).getValue()), 
-    minv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)minv).getValue()), 
-    maxv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)maxv).getValue())));})? (COMMA (dt = identifier (LPAREN l=NUMBER (COMMA s=NUMBER)? RPAREN)? ar=ARRAY? (lsq=LSQUARE (a=NUMBER)? RSQUARE)? (c = CONSTANT)? (DEFAULTVALUE EQ dv = value_expression)? (MINVALUE EQ minv = value_expression)?  (MAXVALUE EQ maxv = value_expression)?
-    {$ret.add(new FunctionArgument(dt,  ar != null || lsq != null, c!=null, 
+    : (fa = function_argument {$ret.add(fa);})? (COMMA fa = function_argument {$ret.add(fa);})* 
+	;
+
+function_argument returns [FunctionArgument ret]
+	: (dt = identifier (LPAREN l=NUMBER (COMMA s=NUMBER)? RPAREN)? ar=ARRAY? (lsq=LSQUARE (a=NUMBER)? RSQUARE)? (c = CONSTANT)? (DEFAULTVALUE EQ dv = value_expression)? (MINVALUE EQ minv = value_expression)?  (MAXVALUE EQ maxv = value_expression)? 
+	{ $ret = new FunctionArgument(dt,  ar != null || lsq != null, c!=null, 
     dv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)dv).getValue()), 
     minv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)minv).getValue()), 
-    maxv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)maxv).getValue())));} ))*
-;
-
+    maxv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)maxv).getValue()));})
+	;
 value_expression_list returns [List<ParseNode> ret]
 @init{ret = new ArrayList<ParseNode>(); }
     :  LPAREN e = value_expression {$ret.add(e);}  (COMMA e = value_expression {$ret.add(e);} )* RPAREN


[25/50] [abbrv] phoenix git commit: PHOENIX-1996 Use BytesStringer instead of ZeroCopyByteString

Posted by ma...@apache.org.
PHOENIX-1996 Use BytesStringer instead of ZeroCopyByteString


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/286ff26d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/286ff26d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/286ff26d

Branch: refs/heads/calcite
Commit: 286ff26d82b2638dc5d3db850fa6f4537ab6153f
Parents: c2fed1d
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed May 20 14:17:47 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Thu May 21 09:25:34 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/cache/ServerCacheClient.java | 10 +++----
 .../phoenix/coprocessor/MetaDataProtocol.java   |  9 +++----
 .../org/apache/phoenix/parse/PFunction.java     |  5 ++--
 .../apache/phoenix/protobuf/ProtobufUtil.java   |  4 +--
 .../query/ConnectionQueryServicesImpl.java      | 18 ++++++-------
 .../org/apache/phoenix/schema/PColumnImpl.java  |  8 +++---
 .../org/apache/phoenix/schema/PTableImpl.java   | 28 ++++++++++----------
 7 files changed, 40 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 1233e1c..9718709 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.coprocessor.ServerCachingProtocol.ServerCacheFactory;
@@ -68,7 +69,6 @@ import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
 
 import com.google.common.collect.ImmutableSet;
-import com.google.protobuf.HBaseZeroCopyByteString;
 
 /**
  * 
@@ -194,9 +194,9 @@ public class ServerCacheClient {
                                                             new BlockingRpcCallback<AddServerCacheResponse>();
                                                     AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder();
                                                     if(connection.getTenantId() != null){
-                                                        builder.setTenantId(HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
+                                                        builder.setTenantId(ByteStringer.wrap(connection.getTenantId().getBytes()));
                                                     }
-                                                    builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
+                                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                                     builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                                     ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory.newBuilder();
                                                     svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
@@ -307,9 +307,9 @@ public class ServerCacheClient {
     									new BlockingRpcCallback<RemoveServerCacheResponse>();
     							RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
     							if(connection.getTenantId() != null){
-    								builder.setTenantId(HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
+    								builder.setTenantId(ByteStringer.wrap(connection.getTenantId().getBytes()));
     							}
-    							builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
+    							builder.setCacheId(ByteStringer.wrap(cacheId));
     							instance.removeServerCache(controller, builder.build(), rpcCallback);
     							if(controller.getFailedOn() != null) {
     								throw controller.getFailedOn();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 2cca4bc..3867e00 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.PFunctionProtos;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
@@ -34,8 +35,6 @@ import org.apache.phoenix.util.ByteUtil;
 
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
-import com.google.protobuf.HBaseZeroCopyByteString;
-
 
 /**
  *
@@ -224,14 +223,14 @@ public abstract class MetaDataProtocol extends MetaDataService {
             }
             if (result.getTableNamesToDelete() != null) {
               for (byte[] tableName : result.tableNamesToDelete) {
-                builder.addTablesToDelete(HBaseZeroCopyByteString.wrap(tableName));
+                builder.addTablesToDelete(ByteStringer.wrap(tableName));
               }
             }
             if(result.getColumnName() != null){
-              builder.setColumnName(HBaseZeroCopyByteString.wrap(result.getColumnName()));
+              builder.setColumnName(ByteStringer.wrap(result.getColumnName()));
             }
             if(result.getFamilyName() != null){
-              builder.setFamilyName(HBaseZeroCopyByteString.wrap(result.getFamilyName()));
+              builder.setFamilyName(ByteStringer.wrap(result.getFamilyName()));
             }
           }
           return builder.build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
index 351bec7..f4bac35 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
@@ -22,6 +22,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.phoenix.coprocessor.generated.PFunctionProtos;
 import org.apache.phoenix.coprocessor.generated.PFunctionProtos.PFunctionArg;
 import org.apache.phoenix.expression.LiteralExpression;
@@ -31,8 +32,6 @@ import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.util.SizedUtil;
 
-import com.google.protobuf.HBaseZeroCopyByteString;
-
 public class PFunction implements PMetaDataEntity {
 
     private PName tenantId = null;
@@ -193,7 +192,7 @@ public class PFunction implements PMetaDataEntity {
     public static PFunctionProtos.PFunction toProto(PFunction function) {
         PFunctionProtos.PFunction.Builder builder = PFunctionProtos.PFunction.newBuilder();
         if(function.getTenantId() != null){
-          builder.setTenantId(HBaseZeroCopyByteString.wrap(function.getTenantId().getBytes()));
+          builder.setTenantId(ByteStringer.wrap(function.getTenantId().getBytes()));
         }
         builder.setFunctionName(function.getFunctionName());
         builder.setClassname(function.getClassName());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
index f4a60bc..5df0492 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java
@@ -30,10 +30,10 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.protobuf.ByteString;
-import com.google.protobuf.HBaseZeroCopyByteString;
 import com.google.protobuf.RpcController;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.ServerCachingProtos;
@@ -131,7 +131,7 @@ public class ProtobufUtil {
     public static ServerCachingProtos.ImmutableBytesWritable toProto(ImmutableBytesWritable w) {
         ServerCachingProtos.ImmutableBytesWritable.Builder builder = 
         		ServerCachingProtos.ImmutableBytesWritable.newBuilder();
-        builder.setByteArray(HBaseZeroCopyByteString.wrap(w.get()));
+        builder.setByteArray(ByteStringer.wrap(w.get()));
         builder.setOffset(w.getOffset());
         builder.setLength(w.getLength());
         return builder.build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 30b43d5..c86ea48 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
 import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.VersionInfo;
@@ -165,7 +166,6 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
-import com.google.protobuf.HBaseZeroCopyByteString;
 
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
@@ -1281,9 +1281,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     BlockingRpcCallback<MetaDataResponse> rpcCallback =
                             new BlockingRpcCallback<MetaDataResponse>();
                     GetTableRequest.Builder builder = GetTableRequest.newBuilder();
-                    builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantIdBytes));
-                    builder.setSchemaName(HBaseZeroCopyByteString.wrap(schemaBytes));
-                    builder.setTableName(HBaseZeroCopyByteString.wrap(tableBytes));
+                    builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
+                    builder.setSchemaName(ByteStringer.wrap(schemaBytes));
+                    builder.setTableName(ByteStringer.wrap(tableBytes));
                     builder.setTableTimestamp(tableTimestamp);
                     builder.setClientTimestamp(clientTimestamp);
 
@@ -2353,9 +2353,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                                 ServerRpcController controller = new ServerRpcController();
                                 BlockingRpcCallback<ClearTableFromCacheResponse> rpcCallback = new BlockingRpcCallback<ClearTableFromCacheResponse>();
                                 ClearTableFromCacheRequest.Builder builder = ClearTableFromCacheRequest.newBuilder();
-                                builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantId));
-                                builder.setTableName(HBaseZeroCopyByteString.wrap(tableName));
-                                builder.setSchemaName(HBaseZeroCopyByteString.wrap(schemaName));
+                                builder.setTenantId(ByteStringer.wrap(tenantId));
+                                builder.setTableName(ByteStringer.wrap(tableName));
+                                builder.setSchemaName(ByteStringer.wrap(schemaName));
                                 builder.setClientTimestamp(clientTS);
                                 instance.clearTableFromCache(controller, builder.build(), rpcCallback);
                                 if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
@@ -2639,9 +2639,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     BlockingRpcCallback<MetaDataResponse> rpcCallback =
                             new BlockingRpcCallback<MetaDataResponse>();
                     GetFunctionsRequest.Builder builder = GetFunctionsRequest.newBuilder();
-                    builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantIdBytes));
+                    builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                     for(Pair<byte[], Long> function: functions) {
-                        builder.addFunctionNames(HBaseZeroCopyByteString.wrap(function.getFirst()));
+                        builder.addFunctionNames(ByteStringer.wrap(function.getFirst()));
                         builder.addFunctionTimestamps(function.getSecond().longValue());
                     }
                     builder.setClientTimestamp(clientTimestamp);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
index ac044df..4efb145 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
@@ -17,13 +17,13 @@
  */
 package org.apache.phoenix.schema;
 
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.phoenix.coprocessor.generated.PTableProtos;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.SizedUtil;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.HBaseZeroCopyByteString;
 
 public class PColumnImpl implements PColumn {
     private PName name;
@@ -238,9 +238,9 @@ public class PColumnImpl implements PColumn {
 
     public static PTableProtos.PColumn toProto(PColumn column) {
         PTableProtos.PColumn.Builder builder = PTableProtos.PColumn.newBuilder();
-        builder.setColumnNameBytes(HBaseZeroCopyByteString.wrap(column.getName().getBytes()));
+        builder.setColumnNameBytes(ByteStringer.wrap(column.getName().getBytes()));
         if (column.getFamilyName() != null) {
-            builder.setFamilyNameBytes(HBaseZeroCopyByteString.wrap(column.getFamilyName().getBytes()));
+            builder.setFamilyNameBytes(ByteStringer.wrap(column.getFamilyName().getBytes()));
         }
         builder.setDataType(column.getDataType().getSqlTypeName());
         if (column.getMaxLength() != null) {
@@ -256,7 +256,7 @@ public class PColumnImpl implements PColumn {
             builder.setArraySize(column.getArraySize());
         }
         if (column.getViewConstant() != null) {
-            builder.setViewConstant(HBaseZeroCopyByteString.wrap(column.getViewConstant()));
+            builder.setViewConstant(ByteStringer.wrap(column.getViewConstant()));
         }
         builder.setViewReferenced(column.isViewReferenced());
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/286ff26d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index bf4420c..9a2ae7f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.generated.PGuidePostsProtos;
 import org.apache.phoenix.coprocessor.generated.PGuidePostsProtos.PGuidePosts;
@@ -71,7 +72,6 @@ import com.google.common.collect.ImmutableSortedMap;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import com.google.protobuf.HBaseZeroCopyByteString;
 import com.sun.istack.NotNull;
 
 /**
@@ -984,10 +984,10 @@ public class PTableImpl implements PTable {
     public static PTableProtos.PTable toProto(PTable table) {
       PTableProtos.PTable.Builder builder = PTableProtos.PTable.newBuilder();
       if(table.getTenantId() != null){
-        builder.setTenantId(HBaseZeroCopyByteString.wrap(table.getTenantId().getBytes()));
+        builder.setTenantId(ByteStringer.wrap(table.getTenantId().getBytes()));
       }
-      builder.setSchemaNameBytes(HBaseZeroCopyByteString.wrap(table.getSchemaName().getBytes()));
-      builder.setTableNameBytes(HBaseZeroCopyByteString.wrap(table.getTableName().getBytes()));
+      builder.setSchemaNameBytes(ByteStringer.wrap(table.getSchemaName().getBytes()));
+      builder.setTableNameBytes(ByteStringer.wrap(table.getTableName().getBytes()));
       builder.setTableType(ProtobufUtil.toPTableTypeProto(table.getType()));
       if (table.getType() == PTableType.INDEX) {
     	if(table.getIndexState() != null) {
@@ -997,14 +997,14 @@ public class PTableImpl implements PTable {
     	  builder.setViewIndexId(table.getViewIndexId());
     	}
     	if(table.getIndexType() != null) {
-    	    builder.setIndexType(HBaseZeroCopyByteString.wrap(new byte[]{table.getIndexType().getSerializedValue()}));
+    	    builder.setIndexType(ByteStringer.wrap(new byte[]{table.getIndexType().getSerializedValue()}));
     	}
       }
       builder.setSequenceNumber(table.getSequenceNumber());
       builder.setTimeStamp(table.getTimeStamp());
       PName tmp = table.getPKName();
       if (tmp != null) {
-        builder.setPkNameBytes(HBaseZeroCopyByteString.wrap(tmp.getBytes()));
+        builder.setPkNameBytes(ByteStringer.wrap(tmp.getBytes()));
       }
       Integer bucketNum = table.getBucketNum();
       int offset = 0;
@@ -1029,14 +1029,14 @@ public class PTableImpl implements PTable {
 
       for (Map.Entry<byte[], GuidePostsInfo> entry : table.getTableStats().getGuidePosts().entrySet()) {
          PTableProtos.PTableStats.Builder statsBuilder = PTableProtos.PTableStats.newBuilder();
-         statsBuilder.setKey(HBaseZeroCopyByteString.wrap(entry.getKey()));
+         statsBuilder.setKey(ByteStringer.wrap(entry.getKey()));
          for (byte[] stat : entry.getValue().getGuidePosts()) {
-             statsBuilder.addValues(HBaseZeroCopyByteString.wrap(stat));
+             statsBuilder.addValues(ByteStringer.wrap(stat));
          }
          statsBuilder.setGuidePostsByteCount(entry.getValue().getByteCount());
          PGuidePostsProtos.PGuidePosts.Builder guidePstsBuilder = PGuidePostsProtos.PGuidePosts.newBuilder();
          for (byte[] stat : entry.getValue().getGuidePosts()) {
-             guidePstsBuilder.addGuidePosts(HBaseZeroCopyByteString.wrap(stat));
+             guidePstsBuilder.addGuidePosts(ByteStringer.wrap(stat));
          }
          guidePstsBuilder.setByteCount(entry.getValue().getByteCount());
          guidePstsBuilder.setRowCount(entry.getValue().getRowCount());
@@ -1046,21 +1046,21 @@ public class PTableImpl implements PTable {
       builder.setStatsTimeStamp(table.getTableStats().getTimestamp());
 
       if (table.getParentName() != null) {
-        builder.setDataTableNameBytes(HBaseZeroCopyByteString.wrap(table.getParentTableName().getBytes()));
+        builder.setDataTableNameBytes(ByteStringer.wrap(table.getParentTableName().getBytes()));
       }
       if (table.getDefaultFamilyName()!= null) {
-        builder.setDefaultFamilyName(HBaseZeroCopyByteString.wrap(table.getDefaultFamilyName().getBytes()));
+        builder.setDefaultFamilyName(ByteStringer.wrap(table.getDefaultFamilyName().getBytes()));
       }
       builder.setDisableWAL(table.isWALDisabled());
       builder.setMultiTenant(table.isMultiTenant());
       builder.setStoreNulls(table.getStoreNulls());
       if(table.getType() == PTableType.VIEW){
-        builder.setViewType(HBaseZeroCopyByteString.wrap(new byte[]{table.getViewType().getSerializedValue()}));
-        builder.setViewStatement(HBaseZeroCopyByteString.wrap(PVarchar.INSTANCE.toBytes(table.getViewStatement())));
+        builder.setViewType(ByteStringer.wrap(new byte[]{table.getViewType().getSerializedValue()}));
+        builder.setViewStatement(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getViewStatement())));
       }
       if(table.getType() == PTableType.VIEW || table.getViewIndexId() != null){
         for (int i = 0; i < table.getPhysicalNames().size(); i++) {
-          builder.addPhysicalNames(HBaseZeroCopyByteString.wrap(table.getPhysicalNames().get(i).getBytes()));
+          builder.addPhysicalNames(ByteStringer.wrap(table.getPhysicalNames().get(i).getBytes()));
         }
       }
 


[09/50] [abbrv] phoenix git commit: PHOENIX-1948 bin scripts run under make_rc.sh packaging

Posted by ma...@apache.org.
PHOENIX-1948 bin scripts run under make_rc.sh packaging


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/45a919f3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/45a919f3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/45a919f3

Branch: refs/heads/calcite
Commit: 45a919f380a2743bdcf3838da2cd9873c3f518c0
Parents: b47dcb6
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed May 6 09:58:35 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed May 6 09:58:35 2015 -0700

----------------------------------------------------------------------
 bin/phoenix_utils.py | 142 +++++++++++++++++++++++++++-------------------
 1 file changed, 84 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/45a919f3/bin/phoenix_utils.py
----------------------------------------------------------------------
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index 2cf7db7..383e0e1 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -41,7 +41,8 @@ def find(pattern, classPaths):
     return ""
 
 def findFileInPathWithoutRecursion(pattern, path):
-
+    if not os.path.exists(path):
+        return ""
     files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]
     # sort the file names so *-client always precedes *-thin-client
     files.sort()
@@ -52,63 +53,71 @@ def findFileInPathWithoutRecursion(pattern, path):
     return ""
 
 def setPath():
- PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar"
- PHOENIX_THIN_CLIENT_JAR_PATTERN = "phoenix-*-thin-client.jar"
- PHOENIX_QUERYSERVER_JAR_PATTERN = "phoenix-server-*-runnable.jar"
- PHOENIX_TESTS_JAR_PATTERN = "phoenix-core-*-tests*.jar"
- global current_dir
- current_dir = os.path.dirname(os.path.abspath(__file__))
- global phoenix_jar_path
- phoenix_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*")
- global phoenix_client_jar
- phoenix_client_jar = find("phoenix-*-client.jar", phoenix_jar_path)
- global phoenix_test_jar_path
- phoenix_test_jar_path = os.path.join(current_dir, "..", "phoenix-core", "target","*")
- global hadoop_common_jar_path
- hadoop_common_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*")
- global hadoop_common_jar
- hadoop_common_jar = find("hadoop-common*.jar", hadoop_common_jar_path)
- global hadoop_hdfs_jar_path
- hadoop_hdfs_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*")
- global hadoop_hdfs_jar
- hadoop_hdfs_jar = find("hadoop-hdfs*.jar", hadoop_hdfs_jar_path)
-
- global hbase_conf_dir
- hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH', '.'))
- global hbase_conf_path # keep conf_path around for backward compatibility
- hbase_conf_path = hbase_conf_dir
- global testjar
- testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_test_jar_path)
- global phoenix_queryserver_jar
- phoenix_queryserver_jar = find(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-server", "target", "*"))
- global phoenix_thin_client_jar
- phoenix_thin_client_jar = find(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-server-client", "target", "*"))
-
- if phoenix_client_jar == "":
-     phoenix_client_jar = findFileInPathWithoutRecursion(PHOENIX_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
-
- if phoenix_thin_client_jar == "":
-     phoenix_thin_client_jar = findFileInPathWithoutRecursion(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
-
- if phoenix_queryserver_jar == "":
-     phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
-
- if testjar == "":
-     testjar = findFileInPathWithoutRecursion(PHOENIX_TESTS_JAR_PATTERN, os.path.join(current_dir, ".."))
-
- # Backward support old env variable PHOENIX_LIB_DIR replaced by PHOENIX_CLASS_PATH
- global phoenix_class_path
- phoenix_class_path = os.getenv('PHOENIX_LIB_DIR','')
- if phoenix_class_path == "":
-     phoenix_class_path = os.getenv('PHOENIX_CLASS_PATH','')
-
- if phoenix_client_jar == "":
-     phoenix_client_jar = find(PHOENIX_CLIENT_JAR_PATTERN, phoenix_class_path)
-
- if testjar == "":
-     testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_class_path)
-
- return ""
+    PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar"
+    PHOENIX_THIN_CLIENT_JAR_PATTERN = "phoenix-*-thin-client.jar"
+    PHOENIX_QUERYSERVER_JAR_PATTERN = "phoenix-server-*-runnable.jar"
+    PHOENIX_TESTS_JAR_PATTERN = "phoenix-core-*-tests*.jar"
+
+    # Backward support old env variable PHOENIX_LIB_DIR replaced by PHOENIX_CLASS_PATH
+    global phoenix_class_path
+    phoenix_class_path = os.getenv('PHOENIX_LIB_DIR','')
+    if phoenix_class_path == "":
+        phoenix_class_path = os.getenv('PHOENIX_CLASS_PATH','')
+
+    global hbase_conf_dir
+    hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH', '.'))
+    global hbase_conf_path # keep conf_path around for backward compatibility
+    hbase_conf_path = hbase_conf_dir
+
+    global current_dir
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+
+    global phoenix_jar_path
+    phoenix_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*")
+
+    global phoenix_client_jar
+    phoenix_client_jar = find("phoenix-*-client.jar", phoenix_jar_path)
+    if phoenix_client_jar == "":
+        phoenix_client_jar = findFileInPathWithoutRecursion(PHOENIX_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
+    if phoenix_client_jar == "":
+        phoenix_client_jar = find(PHOENIX_CLIENT_JAR_PATTERN, phoenix_class_path)
+
+    global phoenix_test_jar_path
+    phoenix_test_jar_path = os.path.join(current_dir, "..", "phoenix-core", "target","*")
+
+    global hadoop_common_jar_path
+    hadoop_common_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*")
+
+    global hadoop_common_jar
+    hadoop_common_jar = find("hadoop-common*.jar", hadoop_common_jar_path)
+
+    global hadoop_hdfs_jar_path
+    hadoop_hdfs_jar_path = os.path.join(current_dir, "..", "phoenix-assembly", "target","*")
+
+    global hadoop_hdfs_jar
+    hadoop_hdfs_jar = find("hadoop-hdfs*.jar", hadoop_hdfs_jar_path)
+
+    global testjar
+    testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_test_jar_path)
+    if testjar == "":
+        testjar = findFileInPathWithoutRecursion(PHOENIX_TESTS_JAR_PATTERN, os.path.join(current_dir, ".."))
+    if testjar == "":
+        testjar = find(PHOENIX_TESTS_JAR_PATTERN, phoenix_class_path)
+
+    global phoenix_queryserver_jar
+    phoenix_queryserver_jar = find(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-server", "target", "*"))
+    if phoenix_queryserver_jar == "":
+        phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, "..", "lib"))
+    if phoenix_queryserver_jar == "":
+        phoenix_queryserver_jar = findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+
+    global phoenix_thin_client_jar
+    phoenix_thin_client_jar = find(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, "..", "phoenix-server-client", "target", "*"))
+    if phoenix_thin_client_jar == "":
+        phoenix_thin_client_jar = findFileInPathWithoutRecursion(PHOENIX_THIN_CLIENT_JAR_PATTERN, os.path.join(current_dir, ".."))
+
+    return ""
 
 def shell_quote(args):
     """
@@ -124,3 +133,20 @@ def shell_quote(args):
         # pipes module isn't available on Windows
         import pipes
         return " ".join([pipes.quote(v) for v in args])
+
+if __name__ == "__main__":
+    setPath()
+    print "phoenix_class_path:", phoenix_class_path
+    print "hbase_conf_dir:", hbase_conf_dir
+    print "hbase_conf_path:", hbase_conf_path
+    print "current_dir:", current_dir
+    print "phoenix_jar_path:", phoenix_jar_path
+    print "phoenix_client_jar:", phoenix_client_jar
+    print "phoenix_test_jar_path:", phoenix_test_jar_path
+    print "hadoop_common_jar_path:", hadoop_common_jar_path
+    print "hadoop_common_jar:", hadoop_common_jar
+    print "hadoop_hdfs_jar_path:", hadoop_hdfs_jar_path
+    print "hadoop_hdfs_jar:", hadoop_hdfs_jar
+    print "testjar:", testjar
+    print "phoenix_queryserver_jar:", phoenix_queryserver_jar
+    print "phoenix_thin_client_jar:", phoenix_thin_client_jar


[15/50] [abbrv] phoenix git commit: PHOENIX-1945 Phoenix tarball from assembly does not contain phoenix-[version]-server.jar

Posted by ma...@apache.org.
PHOENIX-1945 Phoenix tarball from assembly does not contain phoenix-[version]-server.jar


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c1e5c71a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c1e5c71a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c1e5c71a

Branch: refs/heads/calcite
Commit: c1e5c71abb84f0b2dcb3e1384e21a3f5a70a4d1a
Parents: b5ef25c
Author: Enis Soztutar <en...@apache.org>
Authored: Wed May 13 11:11:17 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Wed May 13 11:11:17 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c1e5c71a/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 5a73e7a..51ff74d 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -65,7 +65,7 @@
             </configuration>
           </execution>
           <execution>
-            <id>package-to-tar</id>
+            <id>client-minimal</id>
             <phase>package</phase>
             <goals>
               <goal>single</goal>
@@ -73,51 +73,51 @@
             <configuration>
             <finalName>phoenix-${project.version}</finalName>
               <attach>false</attach>
-              <tarLongFileMode>gnu</tarLongFileMode>
-              <appendAssemblyId>false</appendAssemblyId>
+              <appendAssemblyId>true</appendAssemblyId>
               <descriptors>
-                <descriptor>src/build/package-to-tar-all.xml</descriptor>
+               <!--build the phoenix client jar, but without HBase code. -->
+                <descriptor>src/build/client-without-hbase.xml</descriptor>
+               <!-- build the phoenix client jar, but without HBase (or its depenencies). -->
+                <descriptor>src/build/client-minimal.xml</descriptor>
+               <!-- build the phoenix server side jar, that includes phoenix-hadoopX-compat, phoenix-hadoop-compat and antlr -->
+                <descriptor>src/build/server.xml</descriptor>
+               <!-- build the phoenix server side jar, that includes phoenix-hadoopX-compat and phoenix-hadoop-compat. -->
+                <descriptor>src/build/server-without-antlr.xml</descriptor>
               </descriptors>
-              <tarLongFileMode>posix</tarLongFileMode>
             </configuration>
           </execution>
           <execution>
-            <id>package-to-source-tar</id>
+            <id>package-to-tar</id>
             <phase>package</phase>
             <goals>
               <goal>single</goal>
             </goals>
             <configuration>
-            <finalName>phoenix-${project.version}-source</finalName>
+            <finalName>phoenix-${project.version}</finalName>
               <attach>false</attach>
               <tarLongFileMode>gnu</tarLongFileMode>
               <appendAssemblyId>false</appendAssemblyId>
               <descriptors>
-                <descriptor>src/build/src.xml</descriptor>
+                <descriptor>src/build/package-to-tar-all.xml</descriptor>
               </descriptors>
               <tarLongFileMode>posix</tarLongFileMode>
             </configuration>
-          </execution>          
+          </execution>
           <execution>
-            <id>client-minimal</id>
+            <id>package-to-source-tar</id>
             <phase>package</phase>
             <goals>
               <goal>single</goal>
             </goals>
             <configuration>
-            <finalName>phoenix-${project.version}</finalName>
+            <finalName>phoenix-${project.version}-source</finalName>
               <attach>false</attach>
-              <appendAssemblyId>true</appendAssemblyId>
+              <tarLongFileMode>gnu</tarLongFileMode>
+              <appendAssemblyId>false</appendAssemblyId>
               <descriptors>
-               <!--build the phoenix client jar, but without HBase code. -->
-                <descriptor>src/build/client-without-hbase.xml</descriptor>
-               <!-- build the phoenix client jar, but without HBase (or its depenencies). -->
-                <descriptor>src/build/client-minimal.xml</descriptor>
-               <!-- build the phoenix server side jar, that includes phoenix-hadoopX-compat, phoenix-hadoop-compat and antlr -->
-                <descriptor>src/build/server.xml</descriptor>
-               <!-- build the phoenix server side jar, that includes phoenix-hadoopX-compat and phoenix-hadoop-compat. -->
-                <descriptor>src/build/server-without-antlr.xml</descriptor>
+                <descriptor>src/build/src.xml</descriptor>
               </descriptors>
+              <tarLongFileMode>posix</tarLongFileMode>
             </configuration>
           </execution>
         </executions>


[29/50] [abbrv] phoenix git commit: PHOENIX-2008 Integration tests are failing with HBase-1.1.0 because HBASE-13756(Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-2008 Integration tests are failing with HBase-1.1.0 because HBASE-13756(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a28c1d3b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a28c1d3b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a28c1d3b

Branch: refs/heads/calcite
Commit: a28c1d3b2d31377f70e0a4c661c3c70d8bc99216
Parents: edff624
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Sat May 23 23:27:27 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Sat May 23 23:27:27 2015 +0530

----------------------------------------------------------------------
 phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a28c1d3b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 54ae670..4aa28c4 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -620,6 +620,8 @@ public abstract class BaseTest {
         }
         //no point doing sanity checks when running tests.
         conf.setBoolean("hbase.table.sanity.checks", false);
+        // Remove this configuration once hbase has HBASE-13756 fix.
+        conf.set("hbase.regionserver.msginterval", "300000");
         // set the server rpc controller and rpc scheduler factory, used to configure the cluster
         conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_SERVER_RPC_CONTROLLER_FACTORY);
         conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, DEFAULT_RPC_SCHEDULER_FACTORY);


[39/50] [abbrv] phoenix git commit: PHOENIX-2012 RowKeyComparisonFilter logs unencoded data at DEBUG level

Posted by ma...@apache.org.
PHOENIX-2012 RowKeyComparisonFilter logs unencoded data at DEBUG level


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9e686b75
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9e686b75
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9e686b75

Branch: refs/heads/calcite
Commit: 9e686b758ff735fd9129430cd31fe36993b9711b
Parents: dc3083f
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed May 27 15:58:32 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Mon Jun 1 15:54:37 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/filter/RowKeyComparisonFilter.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9e686b75/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
index 2e2037b..b7de7ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
@@ -73,8 +73,9 @@ public class RowKeyComparisonFilter extends BooleanExpressionFilter {
         if (evaluate) {
             inputTuple.setKey(v.getRowArray(), v.getRowOffset(), v.getRowLength());
             this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple));
-            if (logger.isDebugEnabled()) {
-                logger.debug("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")  + " row " + inputTuple);
+            if (logger.isTraceEnabled()) {
+                logger.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
+                        + " row " + inputTuple);
             }
             evaluate = false;
         }


[18/50] [abbrv] phoenix git commit: PHOENIX-1976 Exit gracefully if addShutdownHook fails.

Posted by ma...@apache.org.
PHOENIX-1976 Exit gracefully if addShutdownHook fails.

If the JVM is already in the process of shutting down,
we don't need to add the shutdown hook for the PhoenixDriver
instance. Additionally, we shouldn't advertise this instance
either since we're going down.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/23f5acf8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/23f5acf8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/23f5acf8

Branch: refs/heads/calcite
Commit: 23f5acf86e1065f6bc8c342df4ba29f18aafea8a
Parents: 289a875
Author: Josh Elser <jo...@gmail.com>
Authored: Thu May 14 17:40:46 2015 -0400
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Fri May 15 11:05:05 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  | 46 ++++++++++++++------
 1 file changed, 32 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/23f5acf8/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 6360d06..cfabe82 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -60,25 +60,43 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
     private static volatile String driverShutdownMsg;
     static {
         try {
-            DriverManager.registerDriver( INSTANCE = new PhoenixDriver() );
-            // Add shutdown hook to release any resources that were never closed
-            // In theory not necessary, but it won't hurt anything
-            Runtime.getRuntime().addShutdownHook(new Thread() {
-                @Override
-                public void run() {
-                    try {
-                        INSTANCE.close();
-                    } catch (SQLException e) {
-                        logger.warn("Unable to close PhoenixDriver on shutdown", e);
-                    } finally {
-                        driverShutdownMsg = "Phoenix driver closed because server is shutting down";
+            INSTANCE = new PhoenixDriver();
+            try {
+                // Add shutdown hook to release any resources that were never closed
+                // In theory not necessary, but it won't hurt anything
+                Runtime.getRuntime().addShutdownHook(new Thread() {
+                    @Override
+                    public void run() {
+                        closeInstance(INSTANCE);
                     }
-                }
-            });
+                });
+
+                // Only register the driver when we successfully register the shutdown hook
+                // Don't want to register it if we're already in the process of going down.
+                DriverManager.registerDriver( INSTANCE );
+            } catch (IllegalStateException e) {
+                logger.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
+
+                // Close the instance now because we don't have the shutdown hook
+                closeInstance(INSTANCE);
+
+                throw e;
+            }
         } catch (SQLException e) {
             throw new IllegalStateException("Unable to register " + PhoenixDriver.class.getName() + ": "+ e.getMessage());
         }
     }
+
+    private static void closeInstance(PhoenixDriver instance) {
+        try {
+            instance.close();
+        } catch (SQLException e) {
+            logger.warn("Unable to close PhoenixDriver on shutdown", e);
+        } finally {
+            driverShutdownMsg = "Phoenix driver closed because server is shutting down";
+        }
+    }
+
     // One entry per cluster here
     private final ConcurrentMap<ConnectionInfo,ConnectionQueryServices> connectionQueryServicesMap = new ConcurrentHashMap<ConnectionInfo,ConnectionQueryServices>(3);
 


[36/50] [abbrv] phoenix git commit: PHOENIX-2022 Make BaseRegionScanner.next abstract

Posted by ma...@apache.org.
PHOENIX-2022 Make BaseRegionScanner.next abstract

Avoid infinite recursion by removing a recursive call within
BaseRegionScanner.next, which was already being used as an
abstract method.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/583b5b1e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/583b5b1e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/583b5b1e

Branch: refs/heads/calcite
Commit: 583b5b1e115a81799cc3e6d0a20a0fe665f666e3
Parents: b7f1382
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Mon Jun 1 08:57:22 2015 +0200
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Mon Jun 1 08:57:22 2015 +0200

----------------------------------------------------------------------
 .../java/org/apache/phoenix/coprocessor/BaseRegionScanner.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/583b5b1e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
index 828f776..3f73048 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
@@ -33,9 +33,7 @@ public abstract class BaseRegionScanner implements RegionScanner {
     }
 
     @Override
-    public boolean next(List<Cell> results) throws IOException {
-        return next(results);
-    }
+    public abstract boolean next(List<Cell> results) throws IOException;
 
     @Override
     public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {


[26/50] [abbrv] phoenix git commit: PHOENIX-1763 Support building with HBase-1.1.0

Posted by ma...@apache.org.
PHOENIX-1763 Support building with HBase-1.1.0


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7bc9cce1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7bc9cce1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7bc9cce1

Branch: refs/heads/calcite
Commit: 7bc9cce172b2b1cebd00275a0f2c586944709231
Parents: 286ff26
Author: Enis Soztutar <en...@apache.org>
Authored: Thu May 21 23:08:26 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Thu May 21 23:33:48 2015 -0700

----------------------------------------------------------------------
 phoenix-core/pom.xml                            | 17 +++--
 .../regionserver/IndexHalfStoreFileReader.java  | 31 ++++++--
 .../regionserver/IndexSplitTransaction.java     | 39 ++++++++--
 .../hbase/regionserver/LocalIndexMerger.java    |  3 +-
 .../cache/aggcache/SpillableGroupByCache.java   | 13 +++-
 .../phoenix/coprocessor/BaseRegionScanner.java  | 12 +--
 .../coprocessor/BaseScannerRegionObserver.java  | 77 +++++++++++---------
 .../coprocessor/DelegateRegionScanner.java      | 23 ++++--
 .../GroupedAggregateRegionObserver.java         | 53 ++++++++------
 .../coprocessor/HashJoinRegionScanner.java      | 60 ++++++++-------
 .../coprocessor/MetaDataRegionObserver.java     | 23 +++---
 .../phoenix/coprocessor/ScanRegionObserver.java | 11 ++-
 .../UngroupedAggregateRegionObserver.java       | 55 +++++++-------
 .../hbase/index/covered/data/LocalTable.java    |  2 +-
 .../index/scanner/FilteredKeyValueScanner.java  |  2 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |  6 +-
 .../iterate/RegionScannerResultIterator.java    |  9 ++-
 .../phoenix/schema/stats/StatisticsScanner.java | 10 ++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  6 +-
 .../index/covered/TestLocalTableState.java      |  1 -
 .../index/write/TestWALRecoveryCaching.java     |  4 +-
 phoenix-flume/pom.xml                           |  9 ---
 phoenix-pig/pom.xml                             | 31 +++++---
 phoenix-spark/pom.xml                           |  7 ++
 pom.xml                                         | 41 ++++++++++-
 25 files changed, 352 insertions(+), 193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 65e4f8e..9ab2a0e 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -354,16 +354,25 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
     </dependency>
     <dependency>
@@ -373,18 +382,16 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
@@ -395,13 +402,11 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 49e2022..9befc8c 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -47,11 +47,11 @@ import org.apache.phoenix.index.IndexMaintainer;
  * that sort lowest and 'top' is the second half of the file with keys that sort greater than those
  * of the bottom half. The top includes the split files midkey, of the key that follows if it does
  * not exist in the file.
- * 
+ *
  * <p>
  * This type works in tandem with the {@link Reference} type. This class is used reading while
  * Reference is used writing.
- * 
+ *
  * <p>
  * This file is not splitable. Calls to {@link #midkey()} return null.
  */
@@ -64,7 +64,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
     private final byte[] splitkey;
     private final byte[] splitRow;
     private final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers;
-    private final byte[][] viewConstants; 
+    private final byte[][] viewConstants;
     private final int offset;
     private final HRegionInfo regionInfo;
     private final byte[] regionStartKeyInHFile;
@@ -144,6 +144,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
             final HFileScanner delegate = s;
             public boolean atEnd = false;
 
+            @Override
             public ByteBuffer getKey() {
                 if (atEnd) {
                     return null;
@@ -160,7 +161,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 // If it is top store file replace the StartKey of the Key with SplitKey
                 return getChangedKey(delegate.getKeyValue(), changeBottomKeys);
             }
-            
+
             private ByteBuffer getChangedKey(Cell kv, boolean changeBottomKeys) {
                 // new KeyValue(row, family, qualifier, timestamp, type, value)
                 byte[] newRowkey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(kv, changeBottomKeys);
@@ -183,6 +184,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return keyReplacedStartKey;
             }
 
+            @Override
             public String getKeyString() {
                 if (atEnd) {
                     return null;
@@ -190,6 +192,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return Bytes.toStringBinary(getKey());
             }
 
+            @Override
             public ByteBuffer getValue() {
                 if (atEnd) {
                     return null;
@@ -197,6 +200,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return delegate.getValue();
             }
 
+            @Override
             public String getValueString() {
                 if (atEnd) {
                     return null;
@@ -204,6 +208,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return Bytes.toStringBinary(getValue());
             }
 
+            @Override
             public Cell getKeyValue() {
                 if (atEnd) {
                     return null;
@@ -227,6 +232,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return changedKv;
             }
 
+            @Override
             public boolean next() throws IOException {
                 if (atEnd) {
                     return false;
@@ -248,10 +254,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 }
             }
 
+            @Override
             public boolean seekBefore(byte[] key) throws IOException {
                 return seekBefore(key, 0, key.length);
             }
 
+            @Override
             public boolean seekBefore(byte[] key, int offset, int length) throws IOException {
 
                 if (top) {
@@ -282,6 +290,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return seekBefore(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
             }
 
+            @Override
             public boolean seekTo() throws IOException {
                 boolean b = delegate.seekTo();
                 if (!b) {
@@ -302,10 +311,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 }
             }
 
+            @Override
             public int seekTo(byte[] key) throws IOException {
                 return seekTo(key, 0, key.length);
             }
 
+            @Override
             public int seekTo(byte[] key, int offset, int length) throws IOException {
                 if (top) {
                     if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
@@ -342,10 +353,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return seekTo(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
             }
 
+            @Override
             public int reseekTo(byte[] key) throws IOException {
                 return reseekTo(key, 0, key.length);
             }
 
+            @Override
             public int reseekTo(byte[] key, int offset, int length) throws IOException {
                 if (top) {
                     if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
@@ -375,11 +388,13 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                 return reseekTo(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength());
             }
 
+            @Override
             public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() {
                 return this.delegate.getReader();
             }
 
             // TODO: Need to change as per IndexHalfStoreFileReader
+            @Override
             public boolean isSeeked() {
                 return this.delegate.isSeeked();
             }
@@ -425,13 +440,13 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
     /**
      * In case of top half store, the passed key will be with the start key of the daughter region.
      * But in the actual HFiles, the key will be with the start key of the old parent region. In
-     * order to make the real seek in the HFiles, we need to build the old key. 
-     * 
+     * order to make the real seek in the HFiles, we need to build the old key.
+     *
      * The logic here is just replace daughter region start key with parent region start key
      * in the key part.
-     * 
+     *
      * @param key
-     * 
+     *
      */
     private KeyValue getKeyPresentInHFiles(byte[] key) {
         KeyValue keyValue = new KeyValue(key);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
index 920380b..3057a14 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
@@ -165,6 +165,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @return <code>true</code> if the region is splittable else
    * <code>false</code> if it is not (e.g. its already closed, etc.).
    */
+  @Override
   public boolean prepare() {
     if (!this.parent.isSplittable()) return false;
     // Split key can be null if this region is unsplittable; i.e. has refs.
@@ -215,6 +216,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    *    Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
    */
+  @Override
   /* package */PairOfSameType<HRegion> createDaughters(final Server server,
       final RegionServerServices services) throws IOException {
     LOG.info("Starting split of region " + this.parent);
@@ -288,16 +290,19 @@ public class IndexSplitTransaction extends SplitTransaction {
       if (metaEntries == null || metaEntries.isEmpty()) {
         MetaTableAccessor.splitRegion(server.getConnection(), parent.getRegionInfo(),
                 daughterRegions.getFirst().getRegionInfo(),
-                daughterRegions.getSecond().getRegionInfo(), server.getServerName());
+                daughterRegions.getSecond().getRegionInfo(), server.getServerName(),
+                parent.getTableDesc().getRegionReplication());
       } else {
         offlineParentInMetaAndputMetaEntries(server.getConnection(),
           parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), daughterRegions
-              .getSecond().getRegionInfo(), server.getServerName(), metaEntries);
+              .getSecond().getRegionInfo(), server.getServerName(), metaEntries,
+              parent.getTableDesc().getRegionReplication());
       }
     }
     return daughterRegions;
   }
 
+  @Override
   public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
       final RegionServerServices services, boolean testing) throws IOException {
     // Set ephemeral SPLITTING znode up in zk.  Mocked servers sometimes don't
@@ -380,6 +385,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @throws IOException If thrown, transaction failed.
    *          Call {@link #rollback(Server, RegionServerServices)}
    */
+  @Override
   /* package */void openDaughters(final Server server,
       final RegionServerServices services, HRegion a, HRegion b)
       throws IOException {
@@ -565,6 +571,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @throws IOException
    * @see #rollback(Server, RegionServerServices)
    */
+  @Override
   public PairOfSameType<HRegion> execute(final Server server,
       final RegionServerServices services)
   throws IOException {
@@ -575,6 +582,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     return stepsAfterPONR(server, services, regions);
   }
 
+  @Override
   public PairOfSameType<HRegion> stepsAfterPONR(final Server server,
       final RegionServerServices services, PairOfSameType<HRegion> regions)
       throws IOException {
@@ -585,7 +593,7 @@ public class IndexSplitTransaction extends SplitTransaction {
 
   private void offlineParentInMetaAndputMetaEntries(Connection conn,
       HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
-      ServerName serverName, List<Mutation> metaEntries) throws IOException {
+      ServerName serverName, List<Mutation> metaEntries, int regionReplication) throws IOException {
     List<Mutation> mutations = metaEntries;
     HRegionInfo copyOfParent = new HRegionInfo(parent);
     copyOfParent.setOffline(true);
@@ -595,7 +603,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
     MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
     mutations.add(putParent);
-    
+
     //Puts for daughters
     Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
     Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
@@ -604,9 +612,18 @@ public class IndexSplitTransaction extends SplitTransaction {
     addLocation(putB, serverName, 1);
     mutations.add(putA);
     mutations.add(putB);
+
+    // Add empty locations for region replicas of daughters so that number of replicas can be
+    // cached whenever the primary region is looked up from meta
+    for (int i = 1; i < regionReplication; i++) {
+      addEmptyLocation(putA, i);
+      addEmptyLocation(putB, i);
+    }
+
     MetaTableAccessor.mutateMetaTable(conn, mutations);
   }
 
+  @Override
   public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
       Bytes.toBytes(sn.getHostAndPort()));
@@ -617,6 +634,13 @@ public class IndexSplitTransaction extends SplitTransaction {
     return p;
   }
 
+  private static Put addEmptyLocation(final Put p, int replicaId){
+    p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId), null);
+    p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId), null);
+    p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId), null);
+    return p;
+  }
+
   /*
    * Open daughter region in its own thread.
    * If we fail, abort this hosting server.
@@ -659,6 +683,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @throws IOException
    * @throws KeeperException
    */
+  @Override
   void openDaughterRegion(final Server server, final HRegion daughter)
   throws IOException, KeeperException {
     HRegionInfo hri = daughter.getRegionInfo();
@@ -767,6 +792,7 @@ public class IndexSplitTransaction extends SplitTransaction {
       this.family = family;
     }
 
+    @Override
     public Void call() throws IOException {
       splitStoreFile(family, sf);
       return null;
@@ -807,6 +833,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @return True if we successfully rolled back, false if we got to the point
    * of no return and so now need to abort the server to minimize damage.
    */
+  @Override
   @SuppressWarnings("deprecation")
   public boolean rollback(final Server server, final RegionServerServices services)
   throws IOException {
@@ -879,10 +906,12 @@ public class IndexSplitTransaction extends SplitTransaction {
     return result;
   }
 
+  @Override
   HRegionInfo getFirstDaughter() {
     return hri_a;
   }
 
+  @Override
   HRegionInfo getSecondDaughter() {
     return hri_b;
   }
@@ -971,7 +1000,7 @@ public class IndexSplitTransaction extends SplitTransaction {
     return ZKAssign.transitionNode(zkw, parent, serverName,
       beginState, endState, znodeVersion, payload);
   }
-  
+
   public HRegion getParent() {
     return this.parent;
   }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
index f074df7..add9b72 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -81,7 +81,8 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
                 this.mergedRegion = rmt.stepsBeforePONR(rss, rss, false);
                 rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(),
                     indexRegionA.getRegionInfo(), indexRegionB.getRegionInfo(),
-                    rss.getServerName(), metaEntries);
+                    rss.getServerName(), metaEntries,
+                    mergedRegion.getTableDesc().getRegionReplication());
             } catch (Exception e) {
                 ctx.bypass();
                 LOG.warn("index regions merge failed with the exception ", e);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index ce18cc2..69fc6f6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -120,7 +120,7 @@ public class SpillableGroupByCache implements GroupByCache {
 
     /**
      * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples used for group by queries
-     * 
+     *
      * @param estSize
      * @param estValueSize
      * @param aggs
@@ -325,7 +325,7 @@ public class SpillableGroupByCache implements GroupByCache {
 
     /**
      * Closes cache and releases spill resources
-     * 
+     *
      * @throws IOException
      */
     @Override
@@ -358,7 +358,9 @@ public class SpillableGroupByCache implements GroupByCache {
 
             @Override
             public boolean next(List<Cell> results) throws IOException {
-                if (!cacheIter.hasNext()) { return false; }
+                if (!cacheIter.hasNext()) {
+                    return false;
+                }
                 Map.Entry<ImmutableBytesWritable, Aggregator[]> ce = cacheIter.next();
                 ImmutableBytesWritable key = ce.getKey();
                 Aggregator[] aggs = ce.getValue();
@@ -377,6 +379,11 @@ public class SpillableGroupByCache implements GroupByCache {
             public long getMaxResultSize() {
               return s.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+                return s.getBatch();
+            }
         };
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
index ff9ac76..828f776 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
@@ -22,14 +22,14 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 
 public abstract class BaseRegionScanner implements RegionScanner {
 
     @Override
     public boolean isFilterDone() {
-        return false; 
+        return false;
     }
 
     @Override
@@ -38,10 +38,10 @@ public abstract class BaseRegionScanner implements RegionScanner {
     }
 
     @Override
-    public boolean next(List<Cell> result, int limit) throws IOException {
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
         return next(result);
     }
-    
+
     @Override
     public boolean reseek(byte[] row) throws IOException {
         throw new DoNotRetryIOException("Unsupported");
@@ -58,7 +58,7 @@ public abstract class BaseRegionScanner implements RegionScanner {
     }
 
     @Override
-    public boolean nextRaw(List<Cell> result, int limit) throws IOException {
-        return next(result, limit);
+    public boolean nextRaw(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        return next(result, scannerContext);
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index a2269b4..fc74968 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -60,7 +61,7 @@ import com.google.common.collect.ImmutableList;
 
 
 abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
-    
+
     public static final String AGGREGATORS = "_Aggs";
     public static final String UNORDERED_GROUP_BY_EXPRESSIONS = "_UnorderedGroupByExpressions";
     public static final String KEY_ORDERED_GROUP_BY_EXPRESSIONS = "_OrderedGroupByExpressions";
@@ -91,7 +92,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
      * Attribute name used to pass custom annotations in Scans and Mutations (later). Custom annotations
      * are used to augment log lines emitted by Phoenix. See https://issues.apache.org/jira/browse/PHOENIX-1198.
      */
-    public static final String CUSTOM_ANNOTATIONS = "_Annot"; 
+    public static final String CUSTOM_ANNOTATIONS = "_Annot";
 
     /** Exposed for testing */
     public static final String SCANNER_OPENED_TRACE_INFO = "Scanner opened on server";
@@ -111,8 +112,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     public String toString() {
         return this.getClass().getName();
     }
-    
-    
+
+
     private static void throwIfScanOutOfRegion(Scan scan, HRegion region) throws DoNotRetryIOException {
         boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
         byte[] lowerInclusiveScanKey = scan.getStartRow();
@@ -136,7 +137,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
 
     abstract protected boolean isRegionObserverFor(Scan scan);
     abstract protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws Throwable;
-    
+
     @Override
     public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
         final Scan scan, final RegionScanner s) throws IOException {
@@ -153,7 +154,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     /**
      * Wrapper for {@link #postScannerOpen(ObserverContext, Scan, RegionScanner)} that ensures no non IOException is thrown,
      * to prevent the coprocessor from becoming blacklisted.
-     * 
+     *
      */
     @Override
     public final RegionScanner postScannerOpen(
@@ -165,10 +166,10 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             }
             boolean success =false;
             // Save the current span. When done with the child span, reset the span back to
-            // what it was. Otherwise, this causes the thread local storing the current span 
+            // what it was. Otherwise, this causes the thread local storing the current span
             // to not be reset back to null causing catastrophic infinite loops
             // and region servers to crash. See https://issues.apache.org/jira/browse/PHOENIX-1596
-            // TraceScope can't be used here because closing the scope will end up calling 
+            // TraceScope can't be used here because closing the scope will end up calling
             // currentSpan.stop() and that should happen only when we are closing the scanner.
             final Span savedSpan = Trace.currentSpan();
             final Span child = Trace.startSpan(SCANNER_OPENED_TRACE_INFO, savedSpan).getSpan();
@@ -226,7 +227,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
         return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
                 dataRegion, indexMaintainer, viewConstants, null, null, projector, ptr);
     }
-    
+
     /**
      * Return wrapped scanner that catches unexpected exceptions (i.e. Phoenix bugs) and
      * re-throws as DoNotRetryIOException to prevent needless retrying hanging the query
@@ -246,7 +247,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final Expression[] arrayFuncRefs, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
             final HRegion dataRegion, final IndexMaintainer indexMaintainer,
-            final byte[][] viewConstants, final KeyValueSchema kvSchema, 
+            final byte[][] viewConstants, final KeyValueSchema kvSchema,
             final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
         return new RegionScanner() {
@@ -262,9 +263,9 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             }
 
             @Override
-            public boolean next(List<Cell> result, int limit) throws IOException {
+            public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
                 try {
-                    return s.next(result, limit);
+                    return s.next(result, scannerContext);
                 } catch (Throwable t) {
                     ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
                     return false; // impossible
@@ -324,30 +325,31 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             }
 
             @Override
-            public boolean nextRaw(List<Cell> result, int limit) throws IOException {
-                try {
-                    boolean next = s.nextRaw(result, limit);
-                    if (result.size() == 0) {
-                        return next;
-                    }
-                    if (arrayFuncRefs != null && arrayFuncRefs.length > 0 && arrayKVRefs.size() > 0) {
-                        replaceArrayIndexElement(arrayKVRefs, arrayFuncRefs, result);
-                    }
-                    if ((offset > 0 || ScanUtil.isLocalIndex(scan))  && !ScanUtil.isAnalyzeTable(scan)) {
-                        IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
-                            tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
-                    }
-                    if (projector != null) {
-                        Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
-                        result.clear();
-                        result.add(tuple.getValue(0));
-                    }
-                    // There is a scanattribute set to retrieve the specific array element
+            public boolean nextRaw(List<Cell> result, ScannerContext scannerContext)
+                throws IOException {
+              try {
+                boolean next = s.nextRaw(result, scannerContext);
+                if (result.size() == 0) {
                     return next;
-                } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
-                    return false; // impossible
                 }
+                if (arrayFuncRefs != null && arrayFuncRefs.length > 0 && arrayKVRefs.size() > 0) {
+                    replaceArrayIndexElement(arrayKVRefs, arrayFuncRefs, result);
+                }
+                if ((offset > 0 || ScanUtil.isLocalIndex(scan))  && !ScanUtil.isAnalyzeTable(scan)) {
+                    IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
+                        tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
+                }
+                if (projector != null) {
+                    Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
+                    result.clear();
+                    result.add(tuple.getValue(0));
+                }
+                // There is a scanattribute set to retrieve the specific array element
+                return next;
+            } catch (Throwable t) {
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                return false; // impossible
+            }
             }
 
             private void replaceArrayIndexElement(final Set<KeyValueColumnExpression> arrayKVRefs,
@@ -387,6 +389,11 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             public long getMaxResultSize() {
                 return s.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+                return s.getBatch();
+            }
         };
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
index f88a931..43c35a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 
 public class DelegateRegionScanner implements RegionScanner {
 
@@ -56,23 +57,33 @@ public class DelegateRegionScanner implements RegionScanner {
         delegate.close();
     }
 
+    @Override
     public long getMaxResultSize() {
         return delegate.getMaxResultSize();
     }
 
-    public boolean next(List<Cell> arg0, int arg1) throws IOException {
-        return delegate.next(arg0, arg1);
+    @Override
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        return delegate.next(result, scannerContext);
     }
 
-    public boolean next(List<Cell> arg0) throws IOException {
-        return delegate.next(arg0);
+    @Override
+    public boolean next(List<Cell> result) throws IOException {
+        return delegate.next(result);
     }
 
-    public boolean nextRaw(List<Cell> arg0, int arg1) throws IOException {
-        return delegate.nextRaw(arg0, arg1);
+    @Override
+    public boolean nextRaw(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        return delegate.nextRaw(result, scannerContext);
     }
 
+    @Override
     public boolean nextRaw(List<Cell> arg0) throws IOException {
         return delegate.nextRaw(arg0);
     }
+
+    @Override
+    public int getBatch() {
+        return delegate.getBatch();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 1f1ba36..19a1663 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -80,7 +80,7 @@ import com.google.common.collect.Maps;
 
 /**
  * Region observer that aggregates grouped rows (i.e. SQL query with GROUP BY clause)
- * 
+ *
  * @since 0.1
  */
 public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
@@ -116,7 +116,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
-        
+
         List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
         ServerAggregators aggregators =
                 ServerAggregators.deserialize(scan
@@ -124,7 +124,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                         .getEnvironment().getConfiguration());
 
         RegionScanner innerScanner = s;
-        
+
         byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
         List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
         TupleProjector tupleProjector = null;
@@ -142,9 +142,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             }
             ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             innerScanner =
-                    getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, 
+                    getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector,
                             dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
-        } 
+        }
 
         if (j != null) {
             innerScanner =
@@ -223,13 +223,13 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
     }
 
     /**
-     * 
+     *
      * Cache for distinct values and their aggregations which is completely
      * in-memory (as opposed to spilling to disk). Used when GROUPBY_SPILLABLE_ATTRIB
      * is set to false. The memory usage is tracked at a coursed grain and will
      * throw and abort if too much is used.
      *
-     * 
+     *
      * @since 3.0.0
      */
     private static final class InMemoryGroupByCache implements GroupByCache {
@@ -238,9 +238,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         private final ServerAggregators aggregators;
         private final RegionCoprocessorEnvironment env;
         private final byte[] customAnnotations;
-        
+
         private int estDistVals;
-        
+
         InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
             int estValueSize = aggregators.getEstimatedByteSize();
             long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize);
@@ -252,7 +252,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             this.chunk = tenantCache.getMemoryManager().allocate(estSize);
             this.customAnnotations = customAnnotations;
         }
-        
+
         @Override
         public void close() throws IOException {
             this.chunk.close();
@@ -291,7 +291,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
             chunk.resize(estSize);
 
             final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size());
-            
+
             final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter =
                     aggregateMap.entrySet().iterator();
             while (cacheIter.hasNext()) {
@@ -333,7 +333,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
 
                 @Override
                 public boolean next(List<Cell> results) throws IOException {
-                    if (index >= aggResults.size()) return false;
+                    if (index >= aggResults.size()) {
+                        return false;
+                    }
                     results.add(aggResults.get(index));
                     index++;
                     return index < aggResults.size();
@@ -343,6 +345,11 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 public long getMaxResultSize() {
                 	return s.getMaxResultSize();
                 }
+
+                @Override
+                public int getBatch() {
+                    return s.getBatch();
+                }
             };
         }
 
@@ -350,22 +357,22 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         public long size() {
             return aggregateMap.size();
         }
-        
+
     }
     private static final class GroupByCacheFactory {
         public static final GroupByCacheFactory INSTANCE = new GroupByCacheFactory();
-        
+
         private GroupByCacheFactory() {
         }
-        
+
         GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
             Configuration conf = env.getConfiguration();
             boolean spillableEnabled =
                     conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
             if (spillableEnabled) {
                 return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals);
-            } 
-            
+            }
+
             return new InMemoryGroupByCache(env, tenantId, customAnnotations, aggregators, estDistVals);
         }
     }
@@ -388,14 +395,14 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
         if (estDistValsBytes != null) {
             // Allocate 1.5x estimation
-            estDistVals = Math.max(MIN_DISTINCT_VALUES, 
+            estDistVals = Math.max(MIN_DISTINCT_VALUES,
                             (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
         }
 
         final boolean spillableEnabled =
                 conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
 
-        GroupByCache groupByCache = 
+        GroupByCache groupByCache =
                 GroupByCacheFactory.INSTANCE.newCache(
                         env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan),
                         aggregators, estDistVals);
@@ -453,7 +460,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
      * Used for an aggregate query in which the key order match the group by key order. In this
      * case, we can do the aggregation as we scan, by detecting when the group by key changes.
      * @param limit TODO
-     * @throws IOException 
+     * @throws IOException
      */
     private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c,
             final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
@@ -559,11 +566,15 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 currentKey = null;
                 return false;
             }
-            
+
             @Override
             public long getMaxResultSize() {
                 return scanner.getMaxResultSize();
             }
+            @Override
+            public int getBatch() {
+                return scanner.getBatch();
+            }
         };
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index cdfc771..1e34d96 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.cache.GlobalCache;
 import org.apache.phoenix.cache.HashCache;
@@ -48,7 +49,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.TupleUtil;
 
 public class HashJoinRegionScanner implements RegionScanner {
-    
+
     private final RegionScanner scanner;
     private final TupleProjector projector;
     private final HashJoinInfo joinInfo;
@@ -60,7 +61,7 @@ public class HashJoinRegionScanner implements RegionScanner {
     private List<Tuple>[] tempTuples;
     private ValueBitSet tempDestBitSet;
     private ValueBitSet[] tempSrcBitSet;
-    
+
     @SuppressWarnings("unchecked")
     public HashJoinRegionScanner(RegionScanner scanner, TupleProjector projector, HashJoinInfo joinInfo, ImmutableBytesWritable tenantId, RegionCoprocessorEnvironment env) throws IOException {
         this.scanner = scanner;
@@ -92,8 +93,8 @@ public class HashJoinRegionScanner implements RegionScanner {
             }
             HashCache hashCache = (HashCache)cache.getServerCache(joinId);
             if (hashCache == null)
-                throw new DoNotRetryIOException("Could not find hash cache for joinId: " 
-                        + Bytes.toString(joinId.get(), joinId.getOffset(), joinId.getLength()) 
+                throw new DoNotRetryIOException("Could not find hash cache for joinId: "
+                        + Bytes.toString(joinId.get(), joinId.getOffset(), joinId.getLength())
                         + ". The cache might have expired and have been removed.");
             hashCaches[i] = hashCache;
             tempSrcBitSet[i] = ValueBitSet.newInstance(joinInfo.getSchemas()[i]);
@@ -103,18 +104,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             this.projector.setValueBitSet(tempDestBitSet);
         }
     }
-    
+
     private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
         if (result.isEmpty())
             return;
-        
+
         Tuple tuple = new ResultTuple(Result.create(result));
         // For backward compatibility. In new versions, HashJoinInfo.forceProjection()
         // always returns true.
         if (joinInfo.forceProjection()) {
             tuple = projector.projectResults(tuple);
         }
-        
+
+        // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well.
         if (hasBatchLimit)
             throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
 
@@ -157,7 +159,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                         Tuple lhs = resultQueue.poll();
                         if (!earlyEvaluation) {
                             ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
-                            tempTuples[i] = hashCaches[i].get(key);                        	
+                            tempTuples[i] = hashCaches[i].get(key);
                             if (tempTuples[i] == null) {
                                 if (type == JoinType.Inner || type == JoinType.Semi) {
                                     continue;
@@ -171,7 +173,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                             Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ?
                                     lhs : TupleProjector.mergeProjectedValue(
                                             (ProjectedValueTuple) lhs, schema, tempDestBitSet,
-                                            null, joinInfo.getSchemas()[i], tempSrcBitSet[i], 
+                                            null, joinInfo.getSchemas()[i], tempSrcBitSet[i],
                                             joinInfo.getFieldPositions()[i]);
                             resultQueue.offer(joined);
                             continue;
@@ -180,7 +182,7 @@ public class HashJoinRegionScanner implements RegionScanner {
                             Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ?
                                     lhs : TupleProjector.mergeProjectedValue(
                                             (ProjectedValueTuple) lhs, schema, tempDestBitSet,
-                                            t, joinInfo.getSchemas()[i], tempSrcBitSet[i], 
+                                            t, joinInfo.getSchemas()[i], tempSrcBitSet[i],
                                             joinInfo.getFieldPositions()[i]);
                             resultQueue.offer(joined);
                         }
@@ -211,18 +213,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             }
         }
     }
-    
+
     private boolean shouldAdvance() {
         if (!resultQueue.isEmpty())
             return false;
-        
+
         return hasMore;
     }
-    
+
     private boolean nextInQueue(List<Cell> results) {
-        if (resultQueue.isEmpty())
+        if (resultQueue.isEmpty()) {
             return false;
-        
+        }
+
         Tuple tuple = resultQueue.poll();
         for (int i = 0; i < tuple.size(); i++) {
             results.add(tuple.getValue(i));
@@ -252,19 +255,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             processResults(result, false);
             result.clear();
         }
-        
+
         return nextInQueue(result);
     }
 
     @Override
-    public boolean nextRaw(List<Cell> result, int limit)
+    public boolean nextRaw(List<Cell> result, ScannerContext scannerContext)
             throws IOException {
         while (shouldAdvance()) {
-            hasMore = scanner.nextRaw(result, limit);
-            processResults(result, true);
+            hasMore = scanner.nextRaw(result, scannerContext);
+            processResults(result, false); // TODO fix honoring the limit
             result.clear();
         }
-        
+
         return nextInQueue(result);
     }
 
@@ -285,19 +288,19 @@ public class HashJoinRegionScanner implements RegionScanner {
             processResults(result, false);
             result.clear();
         }
-        
+
         return nextInQueue(result);
     }
 
     @Override
-    public boolean next(List<Cell> result, int limit) throws IOException {
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
         while (shouldAdvance()) {
-            hasMore = scanner.next(result, limit);
-            processResults(result, true);
+            hasMore = scanner.next(result, scannerContext);
+            processResults(result, false); // TODO honoring the limit
             result.clear();
         }
-        
-        return nextInQueue(result);
+
+      return nextInQueue(result);
     }
 
     @Override
@@ -305,5 +308,10 @@ public class HashJoinRegionScanner implements RegionScanner {
         return this.scanner.getMaxResultSize();
     }
 
+    @Override
+    public int getBatch() {
+        return this.scanner.getBatch();
+    }
+
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 6f1d5ac..c40e3cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -69,20 +69,20 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
     protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1);
     private boolean enableRebuildIndex = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD;
     private long rebuildIndexTimeInterval = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL;
-  
+
     @Override
     public void preClose(final ObserverContext<RegionCoprocessorEnvironment> c,
             boolean abortRequested) {
         executor.shutdownNow();
         GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll();
     }
-    
+
     @Override
     public void start(CoprocessorEnvironment env) throws IOException {
-        // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves 
+        // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves
         // among region servers because we relies on server time of RS which is hosting
         // SYSTEM.CATALOG
-        long sleepTime = env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB, 
+        long sleepTime = env.getConfiguration().getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB,
             QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
         try {
             if(sleepTime > 0) {
@@ -91,12 +91,12 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         } catch (InterruptedException ie) {
             Thread.currentThread().interrupt();
         }
-        enableRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, 
+        enableRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
             QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
-        rebuildIndexTimeInterval = env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, 
+        rebuildIndexTimeInterval = env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
             QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
     }
-    
+
 
     @Override
     public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
@@ -119,7 +119,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
             LOG.error("BuildIndexScheduleTask cannot start!", ex);
         }
     }
-    
+
     /**
      * Task runs periodically to build indexes whose INDEX_NEED_PARTIALLY_REBUILD is set true
      *
@@ -133,7 +133,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
         public BuildIndexScheduleTask(RegionCoprocessorEnvironment env) {
             this.env = env;
         }
-      
+
         private String getJdbcUrl() {
             String zkQuorum = this.env.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM);
             String zkClientPort = this.env.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT,
@@ -144,7 +144,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkClientPort
                 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkParentNode;
         }
-      
+
+        @Override
         public void run() {
             RegionScanner scanner = null;
             PhoenixConnection conn = null;
@@ -199,7 +200,7 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
                         PhoenixDatabaseMetaData.INDEX_STATE_BYTES);
                     if ((dataTable == null || dataTable.length == 0)
                             || (indexStat == null || indexStat.length == 0)
-                            || ((Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexStat) != 0) 
+                            || ((Bytes.compareTo(PIndexState.DISABLE.getSerializedBytes(), indexStat) != 0)
                                     && (Bytes.compareTo(PIndexState.INACTIVE.getSerializedBytes(), indexStat) != 0))) {
                         // index has to be either in disable or inactive state
                         // data table name can't be empty

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index ddde407..77e124d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -199,7 +199,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             indexMaintainer = indexMaintainers.get(0);
             viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
         }
-        
+
         final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
         final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
         innerScanner =
@@ -285,12 +285,12 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                 } finally {
                     try {
                         if(iterator != null) {
-                            iterator.close();    
+                            iterator.close();
                         }
                     } catch (SQLException e) {
                         ServerUtil.throwIOException(region.getRegionNameAsString(), e);
                     } finally {
-                        chunk.close();                
+                        chunk.close();
                     }
                 }
             }
@@ -299,6 +299,11 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             public long getMaxResultSize() {
                 return s.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+              return s.getBatch();
+            }
         };
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e43e5e5..2d6d98a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -101,8 +101,8 @@ import com.google.common.collect.Sets;
 
 /**
  * Region observer that aggregates ungrouped rows(i.e. SQL query with aggregation function and no GROUP BY).
- * 
- * 
+ *
+ *
  * @since 0.1
  */
 public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
@@ -116,7 +116,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     public static final String EMPTY_CF = "EmptyCF";
     private static final Logger logger = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
     private KeyValueBuilder kvBuilder;
-    
+
     @Override
     public void start(CoprocessorEnvironment e) throws IOException {
         super.start(e);
@@ -139,14 +139,14 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     public static void serializeIntoScan(Scan scan) {
         scan.setAttribute(BaseScannerRegionObserver.UNGROUPED_AGG, QueryConstants.TRUE);
     }
-    
+
     @Override
     public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s)
             throws IOException {
         s = super.preScannerOpen(e, scan, s);
         if (ScanUtil.isAnalyzeTable(scan)) {
             // We are setting the start row and stop row such that it covers the entire region. As part
-            // of Phonenix-1263 we are storing the guideposts against the physical table rather than 
+            // of Phonenix-1263 we are storing the guideposts against the physical table rather than
             // individual tenant specific tables.
             scan.setStartRow(HConstants.EMPTY_START_ROW);
             scan.setStopRow(HConstants.EMPTY_END_ROW);
@@ -154,7 +154,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
         return s;
     }
-    
+
     @Override
     protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
         int offset = 0;
@@ -179,9 +179,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
         List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
         List<Mutation> indexMutations = localIndexBytes == null ? Collections.<Mutation>emptyList() : Lists.<Mutation>newArrayListWithExpectedSize(1024);
-        
+
         RegionScanner theScanner = s;
-        
+
         byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
         PTable projectedTable = null;
         List<Expression> selectExpressions = null;
@@ -226,14 +226,14 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             }
             ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
             theScanner =
-                    getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, 
+                    getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
                             dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
-        } 
-        
+        }
+
         if (j != null)  {
             theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment());
         }
-        
+
         int batchSize = 0;
         List<Mutation> mutations = Collections.emptyList();
         boolean buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan;
@@ -330,7 +330,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                                         }
                                         column.getDataType().coerceBytes(ptr, value,
                                             expression.getDataType(), expression.getMaxLength(),
-                                            expression.getScale(), expression.getSortOrder(), 
+                                            expression.getScale(), expression.getSortOrder(),
                                             column.getMaxLength(), column.getScale(),
                                             column.getSortOrder());
                                         byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
@@ -418,7 +418,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                 }
             }
         }
-        
+
         if (logger.isDebugEnabled()) {
         	logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
         }
@@ -438,7 +438,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
         }
         final KeyValue aggKeyValue = keyValue;
-        
+
         RegionScanner scanner = new BaseRegionScanner() {
             private boolean done = !hadAny;
 
@@ -464,11 +464,16 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                 results.add(aggKeyValue);
                 return false;
             }
-            
+
             @Override
             public long getMaxResultSize() {
             	return scan.getMaxResultSize();
             }
+
+            @Override
+            public int getBatch() {
+                return innerScanner.getBatch();
+            }
         };
         return scanner;
     }
@@ -496,7 +501,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
         indexMutations.clear();
     }
-    
+
     @Override
     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c,
         final Store store, InternalScanner scanner, final ScanType scanType)
@@ -505,8 +510,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         InternalScanner internalScanner = scanner;
         if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) {
             try {
-                boolean useCurrentTime = 
-                        c.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+                boolean useCurrentTime =
+                        c.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
                                 QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
                 // Provides a means of clients controlling their timestamps to not use current time
                 // when background tasks are updating stats. Instead we track the max timestamp of
@@ -526,8 +531,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
         return internalScanner;
     }
-    
-    
+
+
     @Override
     public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion l, HRegion r)
             throws IOException {
@@ -535,8 +540,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         TableName table = region.getRegionInfo().getTable();
         StatisticsCollector stats = null;
         try {
-            boolean useCurrentTime = 
-                    e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+            boolean useCurrentTime =
+                    e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
                             QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
             // Provides a means of clients controlling their timestamps to not use current time
             // when background tasks are updating stats. Instead we track the max timestamp of
@@ -544,7 +549,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
             stats = new StatisticsCollector(e.getEnvironment(), table.getNameAsString(), clientTimeStamp);
             stats.splitStats(region, l, r);
-        } catch (IOException ioe) { 
+        } catch (IOException ioe) {
             if(logger.isWarnEnabled()) {
                 logger.warn("Error while collecting stats during split for " + table,ioe);
             }
@@ -559,7 +564,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             return PTableImpl.createFromProto(ptableProto);
         } catch (IOException e) {
             throw new RuntimeException(e);
-        } 
+        }
     }
 
     private static List<Expression> deserializeExpressions(byte[] b) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 3469042..71cc1d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -70,4 +70,4 @@ public class LocalTable implements LocalHBaseState {
     scanner.close();
     return r;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
index e225696..435a1c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java
@@ -57,7 +57,7 @@ public class FilteredKeyValueScanner implements KeyValueScanner {
     /**
      * Same a {@link KeyValueScanner#next()} except that we filter out the next {@link KeyValue} until we find one that
      * passes the filter.
-     * 
+     *
      * @return the next {@link KeyValue} or <tt>null</tt> if no next {@link KeyValue} is present and passes all the
      *         filters.
      */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index b89c807..b5e6a63 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -59,14 +59,14 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
             Mutation m = miniBatchOp.getOperation(i);
             keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
             List<IndexMaintainer> indexMaintainers = getCodec().getIndexMaintainers(m.getAttributesMap());
-            
+
             for(IndexMaintainer indexMaintainer: indexMaintainers) {
                 if (indexMaintainer.isImmutableRows() && indexMaintainer.isLocalIndex()) continue;
                 indexTableName.set(indexMaintainer.getIndexTableName());
                 if (maintainers.get(indexTableName) != null) continue;
                 maintainers.put(indexTableName, indexMaintainer);
             }
-            
+
         }
         if (maintainers.isEmpty()) return;
         Scan scan = IndexManagementUtil.newLocalStateScan(new ArrayList<IndexMaintainer>(maintainers.values()));
@@ -100,7 +100,7 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
     private PhoenixIndexCodec getCodec() {
         return (PhoenixIndexCodec)this.codec;
     }
-    
+
     @Override
     public byte[] getBatchId(Mutation m){
         return this.codec.getBatchId(m);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
index 88e141a..52fbe9c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
@@ -31,15 +31,15 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class RegionScannerResultIterator extends BaseResultIterator {
     private final RegionScanner scanner;
-    
+
     public RegionScannerResultIterator(RegionScanner scanner) {
         this.scanner = scanner;
     }
-    
+
     @Override
     public Tuple next() throws SQLException {
-        // XXX: No access here to the region instance to enclose this with startRegionOperation / 
-        // stopRegionOperation 
+        // XXX: No access here to the region instance to enclose this with startRegionOperation /
+        // stopRegionOperation
         synchronized (scanner) {
             try {
                 // TODO: size
@@ -48,6 +48,7 @@ public class RegionScannerResultIterator extends BaseResultIterator {
                 // since this is an indication of whether or not there are more values after the
                 // ones returned
                 boolean hasMore = scanner.nextRaw(results);
+
                 if (!hasMore && results.isEmpty()) {
                     return null;
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index de59304..0e50923 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 /**
@@ -58,15 +59,15 @@ public class StatisticsScanner implements InternalScanner {
     }
 
     @Override
-    public boolean next(List<Cell> result, int limit) throws IOException {
-        boolean ret = delegate.next(result, limit);
+    public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
+        boolean ret = delegate.next(result, scannerContext);
         updateStat(result);
         return ret;
     }
 
     /**
      * Update the current statistics based on the lastest batch of key-values from the underlying scanner
-     * 
+     *
      * @param results
      *            next batch of {@link KeyValue}s
      */
@@ -122,4 +123,5 @@ public class StatisticsScanner implements InternalScanner {
             }
         }
     }
-}
\ No newline at end of file
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
index 12f1863..030b114 100644
--- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
+++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixIndexRpcSchedulerTest.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.ipc.RpcScheduler.Context;
+import org.apache.hadoop.hbase.ipc.RpcServer.Connection;
 import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -86,11 +87,12 @@ public class PhoenixIndexRpcSchedulerTest {
     }
 
     private void dispatchCallWithPriority(RpcScheduler scheduler, int priority) throws Exception {
+        Connection connection = Mockito.mock(Connection.class);
         CallRunner task = Mockito.mock(CallRunner.class);
         RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build();
         RpcServer server = new RpcServer(null, "test-rpcserver", null, isa, conf, scheduler);
         RpcServer.Call call =
-                server.new Call(0, null, null, header, null, null, null, null, 10, null);
+                server.new Call(0, null, null, header, null, null, connection, null, 10, null, null);
         Mockito.when(task.getCall()).thenReturn(call);
 
         scheduler.dispatch(task);
@@ -98,4 +100,4 @@ public class PhoenixIndexRpcSchedulerTest {
         Mockito.verify(task).getCall();
         Mockito.verifyNoMoreInteractions(task);
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
index 54db5d8..e996b23 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
@@ -37,7 +37,6 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-
 import org.apache.phoenix.hbase.index.covered.IndexUpdate;
 import org.apache.phoenix.hbase.index.covered.LocalTableState;
 import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 60c11d7..ae577bd 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -317,9 +317,9 @@ public class TestWALRecoveryCaching {
         }
 
         LOG.info("Starting region server:" + server.getHostname());
-        cluster.startRegionServer(server.getHostname());
+        cluster.startRegionServer(server.getHostname(), server.getPort());
 
-        cluster.waitForRegionServerToStart(server.getHostname(), TIMEOUT);
+        cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), TIMEOUT);
 
         // start a server to get back to the base number of servers
         LOG.info("STarting server to replace " + server);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index a35e309..8b4ee84 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -89,7 +89,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-testing-util</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
       <optional>true</optional>
       <exclusions>
@@ -102,7 +101,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
       <exclusions>
@@ -115,41 +113,34 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 5005f7c..4ad06d1 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -58,7 +58,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-testing-util</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
       <optional>true</optional>
       <exclusions>
@@ -71,7 +70,6 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
       <exclusions>
@@ -84,41 +82,56 @@
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-common</artifactId>
-      <version>${hbase.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-protocol</artifactId>
-      <version>${hbase.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
-      <version>${hbase.version}</version>
+    </dependency>
+   <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop2-compat</artifactId>
-      <version>${hbase.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index d267d84..7086bb6 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -461,6 +461,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <version>${hbase.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-it</artifactId>
       <version>${hbase.version}</version>
       <type>test-jar</type>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7bc9cce1/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index eec1f2a..707ea66 100644
--- a/pom.xml
+++ b/pom.xml
@@ -79,7 +79,7 @@
     <top.dir>${project.basedir}</top.dir>
 
     <!-- Hadoop Versions -->
-    <hbase.version>1.0.1</hbase.version>
+    <hbase.version>1.1.0</hbase.version>
     <hadoop-two.version>2.5.1</hadoop-two.version>
 
     <!-- Dependency versions -->
@@ -475,6 +475,11 @@
       <!-- HBase dependencies -->
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-annotations</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-testing-util</artifactId>
         <version>${hbase.version}</version>
         <scope>test</scope>
@@ -511,13 +516,34 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-common</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-client</artifactId>
         <version>${hbase.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-client</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-server</artifactId>
+        <version>${hbase.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
         <artifactId>hbase-server</artifactId>
         <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
       </dependency>
       <dependency>
         <groupId>org.apache.hbase</groupId>
@@ -531,6 +557,19 @@
         <type>test-jar</type>
         <scope>test</scope>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-hadoop2-compat</artifactId>
+        <version>${hbase.version}</version>
+        <scope>test</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hbase</groupId>
+        <artifactId>hbase-hadoop2-compat</artifactId>
+        <version>${hbase.version}</version>
+        <type>test-jar</type>
+        <scope>test</scope>
+      </dependency>
 
       <!-- Hadoop Dependencies -->
       <dependency>


[43/50] [abbrv] phoenix git commit: PHOENIX-2018 Implement math build-in function SQRT (Shuxiong Ye)

Posted by ma...@apache.org.
PHOENIX-2018 Implement math build-in function SQRT (Shuxiong Ye)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e54c99d8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e54c99d8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e54c99d8

Branch: refs/heads/calcite
Commit: e54c99d8b1ce7bd6118df46209e102e9a86c3782
Parents: 47466e3
Author: James Taylor <ja...@apache.org>
Authored: Thu Jun 4 14:26:27 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Thu Jun 4 14:26:27 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/SqrtFunctionEnd2EndIT.java  | 143 ++++++++++++++++++
 .../phoenix/expression/ExpressionType.java      |   4 +-
 .../function/JavaMathOneArgumentFunction.java   |  77 ++++++++++
 .../expression/function/SqrtFunction.java       |  49 ++++++
 .../phoenix/expression/SqrtFunctionTest.java    | 150 +++++++++++++++++++
 5 files changed, 422 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e54c99d8/phoenix-core/src/it/java/org/apache/phoenix/end2end/SqrtFunctionEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SqrtFunctionEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SqrtFunctionEnd2EndIT.java
new file mode 100644
index 0000000..50fdd4f
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SqrtFunctionEnd2EndIT.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.SqrtFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link SqrtFunction}
+ */
+public class SqrtFunctionEnd2EndIT extends BaseHBaseManagedTimeIT {
+
+    private static final String KEY = "key";
+    private static final double ZERO = 1e-8;
+
+    @Before
+    public void initTable() throws Exception {
+        Connection conn = null;
+        PreparedStatement stmt = null;
+        try {
+            conn = DriverManager.getConnection(getUrl());
+            String ddl;
+            ddl = "CREATE TABLE testSigned (k VARCHAR NOT NULL PRIMARY KEY, doub DOUBLE, fl FLOAT, inte INTEGER, lon BIGINT, smalli SMALLINT, tinyi TINYINT)";
+            conn.createStatement().execute(ddl);
+            ddl = "CREATE TABLE testUnsigned (k VARCHAR NOT NULL PRIMARY KEY, doub UNSIGNED_DOUBLE, fl UNSIGNED_FLOAT, inte UNSIGNED_INT, lon UNSIGNED_LONG, smalli UNSIGNED_SMALLINT, tinyi UNSIGNED_TINYINT)";
+            conn.createStatement().execute(ddl);
+            conn.commit();
+        } finally {
+            closeStmtAndConn(stmt, conn);
+        }
+    }
+
+    private void updateSignedTable(Connection conn, double data) throws Exception {
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testSigned VALUES (?, ?, ?, ?, ?, ?, ?)");
+        stmt.setString(1, KEY);
+        Double d = Double.valueOf(data);
+        stmt.setDouble(2, d.doubleValue());
+        stmt.setFloat(3, d.floatValue());
+        stmt.setInt(4, d.intValue());
+        stmt.setLong(5, d.longValue());
+        stmt.setShort(6, d.shortValue());
+        stmt.setByte(7, d.byteValue());
+        stmt.executeUpdate();
+        conn.commit();
+    }
+
+    private void updateUnsignedTable(Connection conn, double data) throws Exception {
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testUnsigned VALUES (?, ?, ?, ?, ?, ?, ?)");
+        stmt.setString(1, KEY);
+        Double d = Double.valueOf(data);
+        stmt.setDouble(2, d.doubleValue());
+        stmt.setFloat(3, d.floatValue());
+        stmt.setInt(4, d.intValue());
+        stmt.setLong(5, d.longValue());
+        stmt.setShort(6, d.shortValue());
+        stmt.setByte(7, d.byteValue());
+        stmt.executeUpdate();
+        conn.commit();
+    }
+
+    private void testSignedNumberSpec(Connection conn, double data) throws Exception {
+        updateSignedTable(conn, data);
+        ResultSet rs = conn.createStatement().executeQuery("SELECT SQRT(doub),SQRT(fl),SQRT(inte),SQRT(lon),SQRT(smalli),SQRT(tinyi) FROM testSigned");
+        assertTrue(rs.next());
+        Double d = Double.valueOf(data);
+        assertTrue(Math.abs(rs.getDouble(1) - Math.sqrt(d.doubleValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(2) - Math.sqrt(d.floatValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(3) - Math.sqrt(d.intValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(4) - Math.sqrt(d.longValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(5) - Math.sqrt(d.shortValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(6) - Math.sqrt(d.byteValue())) < ZERO);
+        assertTrue(!rs.next());
+        PreparedStatement stmt = conn.prepareStatement("SELECT k FROM testSigned WHERE SQRT(doub)>0 AND SQRT(fl)>0 AND SQRT(inte)>0 AND SQRT(lon)>0 AND SQRT(smalli)>0 AND SQRT(tinyi)>0");
+        rs = stmt.executeQuery();
+        if (data > 0) {
+            assertTrue(rs.next());
+            assertEquals(KEY, rs.getString(1));
+        }
+        assertTrue(!rs.next());
+    }
+
+    private void testUnsignedNumberSpec(Connection conn, double data) throws Exception {
+        updateUnsignedTable(conn, data);
+        ResultSet rs = conn.createStatement().executeQuery("SELECT SQRT(doub),SQRT(fl),SQRT(inte),SQRT(lon),SQRT(smalli),SQRT(tinyi) FROM testUnsigned");
+        assertTrue(rs.next());
+        Double d = Double.valueOf(data);
+        assertTrue(Math.abs(rs.getDouble(1) - Math.sqrt(d.doubleValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(2) - Math.sqrt(d.floatValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(3) - Math.sqrt(d.intValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(4) - Math.sqrt(d.longValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(5) - Math.sqrt(d.shortValue())) < ZERO);
+        assertTrue(Math.abs(rs.getDouble(6) - Math.sqrt(d.byteValue())) < ZERO);
+        assertTrue(!rs.next());
+        PreparedStatement stmt = conn.prepareStatement("SELECT k FROM testUnsigned WHERE SQRT(doub)>0 AND SQRT(fl)>0 AND SQRT(inte)>0 AND SQRT(lon)>0 AND SQRT(smalli)>0 AND SQRT(tinyi)>0");
+        rs = stmt.executeQuery();
+        if (data > 0) {
+            assertTrue(rs.next());
+            assertEquals(KEY, rs.getString(1));
+        }
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testSignedNumber() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        for (double d : new double[] { 0.0, 1.0, 123.1234}) {
+            testSignedNumberSpec(conn, d);
+        }
+    }
+
+    @Test
+    public void testUnsignedNumber() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        for (double d : new double[] { 0.0, 1.0, 123.1234 }) {
+            testUnsignedNumberSpec(conn, d);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e54c99d8/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index d7142e7..684e620 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -81,6 +81,7 @@ import org.apache.phoenix.expression.function.SQLViewTypeFunction;
 import org.apache.phoenix.expression.function.SecondFunction;
 import org.apache.phoenix.expression.function.SignFunction;
 import org.apache.phoenix.expression.function.SqlTypeNameFunction;
+import org.apache.phoenix.expression.function.SqrtFunction;
 import org.apache.phoenix.expression.function.StddevPopFunction;
 import org.apache.phoenix.expression.function.StddevSampFunction;
 import org.apache.phoenix.expression.function.StringBasedRegexpReplaceFunction;
@@ -231,7 +232,8 @@ public enum ExpressionType {
     DayOfMonthFunction(DayOfMonthFunction.class),
     ArrayAppendFunction(ArrayAppendFunction.class),
     UDFExpression(UDFExpression.class),
-    ArrayPrependFunction(ArrayPrependFunction.class)
+    ArrayPrependFunction(ArrayPrependFunction.class),
+    SqrtFunction(SqrtFunction.class)
     ;
 
     ExpressionType(Class<? extends Expression> clazz) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e54c99d8/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
new file mode 100644
index 0000000..4ea5367
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+
+public abstract class JavaMathOneArgumentFunction extends ScalarFunction {
+
+    public JavaMathOneArgumentFunction() {
+    }
+
+    public JavaMathOneArgumentFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    protected abstract double compute(double firstArg);
+
+    @Override
+    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+        Expression childExpr = children.get(0);
+        PDataType returnType = getDataType();
+        if (childExpr.evaluate(tuple, ptr)) {
+            if (ptr.getLength() == 0) {
+                return true;
+            }
+            double result;
+            if (childExpr.getDataType() == PDecimal.INSTANCE) {
+                result =
+                        ((BigDecimal) childExpr.getDataType().toObject(ptr,
+                            childExpr.getSortOrder())).doubleValue();
+            } else {
+                result =
+                        childExpr.getDataType().getCodec()
+                                .decodeDouble(ptr, childExpr.getSortOrder());
+            }
+            ptr.set(new byte[returnType.getByteSize()]);
+            returnType.getCodec().encodeDouble(compute(result), ptr);
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    @Override
+    public PDataType getDataType() {
+        return PDouble.INSTANCE;
+    }
+
+    @Override
+    public OrderPreserving preservesOrder() {
+        return OrderPreserving.YES;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e54c99d8/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
new file mode 100644
index 0000000..bb5376e
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.types.PDecimal;
+
+@BuiltInFunction(name = SqrtFunction.NAME, args = { @Argument(allowedTypes = { PDecimal.class }) })
+public class SqrtFunction extends JavaMathOneArgumentFunction {
+
+    public static final String NAME = "SQRT";
+
+    public SqrtFunction() {
+    }
+
+    public SqrtFunction(List<Expression> children) throws SQLException {
+        super(children);
+    }
+
+    @Override
+    public String getName() {
+        return NAME;
+    }
+
+    @Override
+    protected double compute(double firstArg) {
+        return Math.sqrt(firstArg);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e54c99d8/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java
new file mode 100644
index 0000000..6b19ad8
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.SqrtFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PNumericType;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Unit tests for {@link SqrtFunction}
+ */
+public class SqrtFunctionTest {
+    private static final double ZERO = 1e-9;
+
+    private static boolean twoDoubleEquals(double a, double b) {
+        if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
+        if (Double.isNaN(a)) return true;
+        if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
+        if (Double.isInfinite(a)) {
+            if ((a > 0) ^ (b > 0)) return false;
+            else return true;
+        }
+        if (Math.abs(a - b) <= ZERO) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    private static boolean testExpression(LiteralExpression literal, double expected)
+            throws SQLException {
+        List<Expression> expressions = Lists.newArrayList((Expression) literal);
+        Expression sqrtFunction = new SqrtFunction(expressions);
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        boolean ret = sqrtFunction.evaluate(null, ptr);
+        if (ret) {
+            Double result =
+                    (Double) sqrtFunction.getDataType().toObject(ptr, sqrtFunction.getSortOrder());
+            assertTrue(twoDoubleEquals(result.doubleValue(), expected));
+        }
+        return ret;
+    }
+
+    private static void test(Number value, PNumericType dataType, double expected)
+            throws SQLException {
+        LiteralExpression literal;
+        literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+        boolean ret1 = testExpression(literal, expected);
+        literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+        boolean ret2 = testExpression(literal, expected);
+        assertEquals(ret1, ret2);
+    }
+
+    private static void testBatch(Number[] value, PNumericType dataType) throws SQLException {
+        double[] expected = new double[value.length];
+        for (int i = 0; i < expected.length; ++i) {
+            expected[i] = Math.sqrt(value[i].doubleValue());
+        }
+        assertEquals(value.length, expected.length);
+        for (int i = 0; i < value.length; ++i) {
+            test(value[i], dataType, expected[i]);
+        }
+    }
+
+    @Test
+    public void testSqrtFunction() throws Exception {
+        Random random = new Random();
+
+        testBatch(
+            new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0),
+                    BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234),
+                    BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()),
+                    BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE);
+
+        testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+                random.nextFloat() }, PFloat.INSTANCE);
+
+        testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+                random.nextFloat() }, PFloat.INSTANCE);
+
+        testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE);
+
+        testBatch(
+            new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(),
+                    random.nextDouble() }, PDouble.INSTANCE);
+
+        testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE);
+
+        testBatch(
+            new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L,
+                    random.nextLong(), random.nextLong() }, PLong.INSTANCE);
+
+        testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE);
+
+        testBatch(
+            new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123,
+                    random.nextInt(), random.nextInt() }, PInteger.INSTANCE);
+
+        testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE);
+
+        testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE,
+                (short) 123, (short) -123 }, PSmallint.INSTANCE);
+
+        testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 },
+            PSmallint.INSTANCE);
+
+        testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE,
+                (byte) 123, (byte) -123 }, PTinyint.INSTANCE);
+
+        testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE);
+    }
+}


[34/50] [abbrv] phoenix git commit: PHOENIX-1939 Test are failing with DoNotRetryIOException: ATABLE: null (Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-1939 Test are failing with DoNotRetryIOException: ATABLE: null (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/08fc27d4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/08fc27d4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/08fc27d4

Branch: refs/heads/calcite
Commit: 08fc27d4c352f41f4999c8aa8bce953b3f4092cb
Parents: 160e949
Author: Nick Dimiduk <nd...@apache.org>
Authored: Fri May 29 17:12:25 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Fri May 29 17:12:25 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/schema/PTableImpl.java      | 4 ++--
 .../src/test/java/org/apache/phoenix/query/BaseTest.java         | 3 ++-
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/08fc27d4/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 9a2ae7f..b62dbf5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -88,8 +88,8 @@ public class PTableImpl implements PTable {
 
     private PTableKey key;
     private PName name;
-    private PName schemaName;
-    private PName tableName;
+    private PName schemaName = PName.EMPTY_NAME;
+    private PName tableName = PName.EMPTY_NAME;
     private PName tenantId;
     private PTableType type;
     private PIndexState state;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/08fc27d4/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 54ae670..b0574c3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -121,7 +121,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.LocalIndexMerger;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -829,6 +828,7 @@ public abstract class BaseTest {
                     logger.info("Table " + fullTableName + " is already deleted.");
                 }
             }
+            rs.close();
             if (lastTenantId != null) {
                 conn.close();
             }
@@ -860,6 +860,7 @@ public abstract class BaseTest {
             logger.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
             conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
         }
+        rs.close();
     }
     
     protected static void initSumDoubleValues(byte[][] splits, String url) throws Exception {


[13/50] [abbrv] phoenix git commit: PHOENIX-1962 Apply check style to the build

Posted by ma...@apache.org.
PHOENIX-1962 Apply check style to the build


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/978b2322
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/978b2322
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/978b2322

Branch: refs/heads/calcite
Commit: 978b2322e3e962550c1cddda9910f4f70346aaee
Parents: 93397af
Author: Nick Dimiduk <nd...@apache.org>
Authored: Sat May 9 11:10:54 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Mon May 11 09:52:00 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                    |   4 +
 phoenix-core/pom.xml                        |   4 +
 phoenix-flume/pom.xml                       |   4 +
 phoenix-pherf/pom.xml                       |   1 +
 phoenix-pig/pom.xml                         |   4 +
 phoenix-server-client/pom.xml               |   4 +
 phoenix-server/pom.xml                      |   4 +
 phoenix-spark/pom.xml                       |   1 +
 pom.xml                                     |  23 ++
 src/main/config/checkstyle/checker.xml      | 281 +++++++++++++++++++++++
 src/main/config/checkstyle/header.txt       |  16 ++
 src/main/config/checkstyle/suppressions.xml |  46 ++++
 12 files changed, 392 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index d743bcf..5a73e7a 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -33,6 +33,10 @@
   <description>Assemble Phoenix artifacts</description>
   <packaging>pom</packaging>
 
+  <properties>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+
   <build>
     <plugins>
       <plugin>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index a4c052c..65e4f8e 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -24,6 +24,10 @@
       <url>http://www.apache.org</url>
   </organization>
 
+  <properties>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+
   <build>
     <resources>
       <resource>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index b8c4b8a..a35e309 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -31,6 +31,10 @@
   <artifactId>phoenix-flume</artifactId>
   <name>Phoenix - Flume</name>
 
+  <properties>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+
   <dependencies>
    <dependency>
       <groupId>org.apache.phoenix</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 337f69c..1667c66 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -30,6 +30,7 @@
     <name>Phoenix - Pherf</name>
 
     <properties>
+      <top.dir>${project.basedir}/..</top.dir>
     </properties>
 
     <profiles>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index c1b0985..5005f7c 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -31,6 +31,10 @@
   <artifactId>phoenix-pig</artifactId>
   <name>Phoenix - Pig</name>
 
+  <properties>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+
   <dependencies>
     <dependency>
       <groupId>org.apache.phoenix</groupId>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-server-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server-client/pom.xml b/phoenix-server-client/pom.xml
index 5e2d32e..e23fcba 100644
--- a/phoenix-server-client/pom.xml
+++ b/phoenix-server-client/pom.xml
@@ -24,6 +24,10 @@
     <url>http://www.apache.org</url>
   </organization>
 
+  <properties>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+
   <build>
     <plugins>
       <plugin>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 4737b63..7dd09aa 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -24,6 +24,10 @@
     <url>http://www.apache.org</url>
   </organization>
 
+  <properties>
+    <top.dir>${project.basedir}/..</top.dir>
+  </properties>
+
   <build>
     <plugins>
       <plugin>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index a61ba5b..d267d84 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -37,6 +37,7 @@
     <spark.version>1.3.0</spark.version>
     <scala.version>2.10.4</scala.version>
     <scala.binary.version>2.10</scala.binary.version>
+    <top.dir>${project.basedir}/..</top.dir>
   </properties>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0371191..23ac578 100644
--- a/pom.xml
+++ b/pom.xml
@@ -76,6 +76,7 @@
     <antlr-input.dir>src/main/antlr3</antlr-input.dir>
     <antlr-output.dir>target/generated-sources/antlr3</antlr-output.dir>
     <test.output.tofile>true</test.output.tofile>
+    <top.dir>${project.basedir}</top.dir>
 
     <!-- Hadoop Versions -->
     <hbase.version>1.0.1</hbase.version>
@@ -332,6 +333,28 @@
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <version>2.13</version>
+        <executions>
+          <execution>
+            <id>validate</id>
+            <phase>validate</phase>
+            <configuration>
+              <configLocation>${top.dir}/src/main/config/checkstyle/checker.xml</configLocation>
+              <suppressionsLocation>${top.dir}/src/main/config/checkstyle/suppressions.xml</suppressionsLocation>
+              <consoleOutput>true</consoleOutput>
+              <headerLocation>${top.dir}/src/main/config/checkstyle/header.txt</headerLocation>
+              <failOnViolation><!--true-->false</failOnViolation>
+              <includeTestSourceDirectory><!--true-->false</includeTestSourceDirectory>
+            </configuration>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-source-plugin</artifactId>
         <version>2.2.1</version>
         <executions>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/src/main/config/checkstyle/checker.xml
----------------------------------------------------------------------
diff --git a/src/main/config/checkstyle/checker.xml b/src/main/config/checkstyle/checker.xml
new file mode 100644
index 0000000..ecf3946
--- /dev/null
+++ b/src/main/config/checkstyle/checker.xml
@@ -0,0 +1,281 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!--
+  This version of checkstyle is based on the Apache Calcite checkstyle
+  checkstyle configuration, which in turn is based on Giraph and Hadoop and
+  common-math configurations.
+
+  The documentation for checkstyle is available at
+
+  http://checkstyle.sourceforge.net
+-->
+
+<!DOCTYPE module PUBLIC "-//Puppy Crawl//DTD Check Configuration 1.1//EN" "http://www.puppycrawl.com/dtds/configuration_1_1.dtd">
+
+<!-- Calcite customization of default Checkstyle behavior -->
+<module name="Checker">
+  <property name="localeLanguage" value="en"/>
+
+  <!-- Checks for headers -->
+  <!-- See http://checkstyle.sf.net/config_header.html -->
+    <!-- Verify that EVERY source file has the appropriate license -->
+  <module name="Header">
+    <property name="headerFile" value="${checkstyle.header.file}"/>
+  </module>
+
+  <!-- Checks for Javadoc comments (checker).           -->
+  <!-- See http://checkstyle.sf.net/config_javadoc.html -->
+    <!-- Require package javadoc -->
+  <module name="JavadocPackage"/>
+
+  <!-- Miscellaneous other checks (checker).         -->
+  <!-- See http://checkstyle.sf.net/config_misc.html -->
+    <!-- Require files to end with newline characters -->
+  <module name="NewlineAtEndOfFile">
+    <property name="lineSeparator" value="lf"/>
+  </module>
+
+  <!-- Checks for whitespace (tree walker)                 -->
+  <!-- See http://checkstyle.sf.net/config_whitespace.html -->
+    <!-- No tabs allowed! -->
+  <module name="FileTabCharacter"/>
+
+  <module name="TreeWalker">
+    <property name="cacheFile" value="target/checkstyle-cachefile"/>
+
+    <!-- Checks for blocks. You know, those {}'s         -->
+    <!-- See http://checkstyle.sf.net/config_blocks.html -->
+      <!-- No empty blocks (i.e. catch); must contain at least a comment -->
+    <module name="EmptyBlock">
+      <property name="option" value="text"/>
+    </module>
+    <module name="AvoidNestedBlocks">
+        <property name="allowInSwitchCase" value="true"/>
+    </module>
+    <module name="LeftCurly"/>
+      <!-- No if/else/do/for/while without braces -->
+    <module name="NeedBraces"/>
+    <module name="RightCurly"/>
+
+    <!-- Checks for class design                         -->
+    <!-- See http://checkstyle.sf.net/config_design.html -->
+      <!-- Utility class should not be instantiated, they must have a
+	   private constructor -->
+    <module name="HideUtilityClassConstructor"/>
+
+    <!-- Checks for common coding problems               -->
+    <!-- See http://checkstyle.sf.net/config_coding.html -->
+    <module name="EmptyStatement"/>
+      <!-- Require hash code override when equals is -->
+    <module name="EqualsHashCode"/>
+      <!-- Disallow unnecessary instantiation of Boolean, String -->
+    <module name="IllegalInstantiation">
+      <property name="classes" value="java.lang.Boolean, java.lang.String"/>
+    </module>
+      <!-- Switch statements should be complete and with independent cases -->
+    <module name="FallThrough"/>
+    <!-- For hadoop_yarn profile, some YARN exceptions aren't loading in checkstyle -->
+    <module name="RedundantThrows">
+        <property name="suppressLoadErrors" value="true" />
+    </module>
+    <module name="SimplifyBooleanExpression"/>
+    <module name="SimplifyBooleanReturn"/>
+      <!-- Only one statement per line allowed -->
+    <module name="OneStatementPerLine"/>
+      <!-- Don't add up parentheses when they are not required -->
+    <module name="UnnecessaryParentheses" />
+      <!-- Don't use = or != for string comparisons -->
+    <module name="StringLiteralEquality" />
+      <!-- Don't declare multiple variables in the same statement -->
+    <module name="MultipleVariableDeclarations" />
+      <!-- String literals more than one character long should not be
+	   repeated several times -->
+      <!-- the "unchecked" string is also accepted to allow
+	   @SuppressWarnings("unchecked") -->
+      <!-- Disabling for now until we have a better ignoreStringsRegexp -->
+      <!--
+    <module name="MultipleStringLiterals" >
+      <property name="ignoreStringsRegexp" value='^(("")|(".")|("unchecked"))$'/>
+    </module>
+      -->
+
+    <!-- Checks for imports                              -->
+    <!-- See http://checkstyle.sf.net/config_import.html -->
+    <module name="RedundantImport"/>
+      <!-- Import should be explicit, and only from pure java packages.
+           But we allow imports that are only used in javadoc. -->
+    <module name="UnusedImports">
+      <property name="processJavadoc" value="true"/>
+    </module>
+    <module name="IllegalImport" />
+    <module name="AvoidStarImport" />
+    <module name="ImportOrder">
+      <property name="groups" value="java,javax,lib,shared,common,platform,org,com,io,net,scala,clover"/>
+      <property name="ordered" value="true"/>
+      <property name="separated" value="true"/>
+      <property name="option" value="bottom"/>
+    </module>
+
+    <!-- Checks for Javadoc comments (tree walker).       -->
+    <!-- See http://checkstyle.sf.net/config_javadoc.html -->
+      <!-- Javadoc must be formatted correctly -->
+    <module name="JavadocStyle">
+      <property name="checkFirstSentence" value="false"/>
+    </module>
+      <!-- Must have class / interface header comments -->
+    <module name="JavadocType"/>
+
+    <!-- Miscellaneous other checks (tree walker).     -->
+    <!-- See http://checkstyle.sf.net/config_misc.html -->
+      <!-- Java style arrays -->
+    <module name="ArrayTypeStyle"/>
+      <!-- Indentation -->
+    <module name="Indentation">
+      <property name="caseIndent" value="0"/>
+      <property name="basicOffset" value="4"/>
+      <property name="braceAdjustment" value="0"/>
+    </module>
+      <!-- Turn this on to see what needs to be done
+    <module name="TodoComment"/>
+       -->
+    <module name="UpperEll"/>
+
+    <module name="OperatorWrap"/>
+
+    <!-- Modifier Checks                                    -->
+    <!-- See http://checkstyle.sf.net/config_modifiers.html -->
+      <!-- Use a consistent way to put modifiers -->
+    <module name="ModifierOrder"/>
+    <module name="RedundantModifier"/>
+
+    <!-- Checks for Naming Conventions.                  -->
+    <!-- See http://checkstyle.sf.net/config_naming.html -->
+      <!-- Constant names should obey the traditional all uppercase
+	   naming convention -->
+    <module name="ConstantName"/>
+    <module name="LocalFinalVariableName">
+      <!-- Allow '_' except first. -->
+      <property name="format" value="^[a-z][a-zA-Z0-9_]*$"/>
+    </module>
+    <module name="LocalVariableName">
+      <!-- Allow '_' except first. -->
+      <property name="format" value="^[a-z][a-zA-Z0-9_]*$"/>
+    </module>
+    <module name="MemberName"/>
+    <module name="MethodName">
+      <!-- Allow trailing '_', signifying private methods.
+           Also allow '_' prefix, indicating disabled method or junit test. -->
+      <property name="format" value="^_?[a-z][a-zA-Z0-9]*_?$"/>
+    </module>
+    <module name="PackageName"/>
+    <module name="ParameterName">
+      <!-- Allow trailing '_'. -->
+      <property name="format" value="^[a-z][a-zA-Z0-9]*_?$"/>
+    </module>
+    <module name="StaticVariableName"/>
+    <module name="TypeName"/>
+
+    <!-- Checks for regexp expressions.                  -->
+    <!-- See http://checkstyle.sf.net/config_regexp.html -->
+
+    <!-- No trailing whitespace -->
+    <module name="Regexp">
+      <property name="format" value="[ \t]+$"/>
+      <property name="illegalPattern" value="true"/>
+      <property name="message" value="Trailing whitespace"/>
+    </module>
+
+    <!-- Authors should be in pom.xml file -->
+    <module name="Regexp">
+      <property name="format" value="@author"/>
+      <property name="illegalPattern" value="true"/>
+      <property name="message" value="developers names should be in pom file"/>
+    </module>
+
+    <!-- No multi-line C-style comments except at start of line. -->
+    <module name="Regexp">
+      <property name="format" value="^ +/\*[^*][^/]$"/>
+      <property name="illegalPattern" value="true"/>
+      <property name="message" value="C-style comment"/>
+    </module>
+
+    <module name="Regexp">
+      <property name="format" value="^ +/\*$"/>
+      <property name="illegalPattern" value="true"/>
+      <property name="message" value="C-style comment"/>
+    </module>
+
+    <!-- Checks for Size Violations.                    -->
+    <!-- See http://checkstyle.sf.net/config_sizes.html -->
+    <!-- Lines cannot exceed 100 chars, except if they are hyperlinks
+         or strings (possibly preceded by '+' and followed by say '),'. -->
+    <module name="LineLength">
+      <property name="max" value="100"/>
+      <property name="ignorePattern" value="^import|@see|@link|@BaseMessage|href|^[ +]*&quot;.*&quot;[);,]*$"/>
+    </module>
+      <!-- Over time, we will revise this down -->
+    <module name="MethodLength">
+      <property name="max" value="390"/>
+    </module>
+
+    <!-- Checks for whitespace (tree walker)                 -->
+    <!-- See http://checkstyle.sf.net/config_whitespace.html -->
+    <module name="EmptyForIteratorPad"/>
+      <!-- Spacing around methods -->
+    <module name="MethodParamPad">
+      <property name="option" value="nospace"/>
+      <property name="allowLineBreaks" value="true"/>
+     </module>
+      <!-- No whitespace before a token -->
+    <module name="NoWhitespaceBefore"/>
+      <!-- Whitespace after tokens is required -->
+    <module name="WhitespaceAfter"/>
+      <!-- Whitespace around tokens is required -->
+    <module name="WhitespaceAround">
+        <property name="allowEmptyConstructors" value="true"/>
+        <property name="allowEmptyMethods" value="true"/>
+    </module>
+    <module name="ParenPad"/>
+    <module name="TypecastParenPad"/>
+      <!-- No extra whitespace around types -->
+    <module name="GenericWhitespace"/>
+
+    <!-- Required for SuppressionCommentFilter below -->
+    <module name="FileContentsHolder"/>
+  </module>
+
+  <!-- Setup special comments to suppress specific checks from source files -->
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CHECKSTYLE\: stop ([\w\|]+)"/>
+    <property name="onCommentFormat"  value="CHECKSTYLE\: resume ([\w\|]+)"/>
+    <property name="checkFormat"      value="$1"/>
+  </module>
+
+  <!-- Turn off all checks between OFF and ON -->
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CHECKSTYLE\: OFF"/>
+    <property name="onCommentFormat"  value="CHECKSTYLE\: ON"/>
+  </module>
+
+  <!-- Turn off checks for the next N lines. -->
+  <module name="SuppressWithNearbyCommentFilter">
+    <property name="commentFormat" value="CHECKSTYLE: +IGNORE (\d+)"/>
+    <property name="influenceFormat" value="$1"/>
+  </module>
+</module>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/src/main/config/checkstyle/header.txt
----------------------------------------------------------------------
diff --git a/src/main/config/checkstyle/header.txt b/src/main/config/checkstyle/header.txt
new file mode 100644
index 0000000..2a42971
--- /dev/null
+++ b/src/main/config/checkstyle/header.txt
@@ -0,0 +1,16 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */

http://git-wip-us.apache.org/repos/asf/phoenix/blob/978b2322/src/main/config/checkstyle/suppressions.xml
----------------------------------------------------------------------
diff --git a/src/main/config/checkstyle/suppressions.xml b/src/main/config/checkstyle/suppressions.xml
new file mode 100644
index 0000000..6662eca
--- /dev/null
+++ b/src/main/config/checkstyle/suppressions.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!DOCTYPE suppressions PUBLIC
+        "-//Puppy Crawl//DTD Suppressions 1.1//EN"
+        "http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<suppressions>
+  <!-- Suppress checks on generated files. -->
+  <suppress checks="Header" files="CalciteResource.properties"/>
+  <suppress checks=".*" files="org-apache-calcite-jdbc.properties"/>
+  <suppress checks=".*" files="Foo.java"/>
+  <suppress checks=".*" files=".*/target/maven-archiver/pom.properties"/>
+  <suppress checks=".*" files="git.properties"/>
+  <suppress checks=".*" files="trace.properties"/>
+  <suppress checks=".*" files="release.properties"/>
+
+  <!-- This file triggers https://github.com/checkstyle/checkstyle/issues/92,
+       through no fault of its own. -->
+  <suppress checks=".*" files="SqlSimpleParser.java"/>
+
+  <!-- Don't complain about field names such as cust_id -->
+  <suppress checks=".*Name" files="JdbcExample.java"/>
+
+  <!-- Suppress JavadocPackage in the test packages -->
+  <suppress checks="JavadocPackage" files="src[/\\]test[/\\]java[/\\]"/>
+
+  <!-- And likewise in ubenchmark -->
+  <suppress checks="JavadocPackage" files="StatementTest.java"/>
+
+  <!-- Method names in Resource can have underscores -->
+  <suppress checks="MethodName" files="CalciteResource.java"/>
+</suppressions>


[17/50] [abbrv] phoenix git commit: PHOENIX-1963 - Irregular failures in ResultTest#testMonitorResult

Posted by ma...@apache.org.
PHOENIX-1963 - Irregular failures in ResultTest#testMonitorResult


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/289a875b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/289a875b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/289a875b

Branch: refs/heads/calcite
Commit: 289a875bd1cd76b6437ae1400d6c324bfe3e0754
Parents: a1032fb
Author: cmarcel <cm...@salesforce.com>
Authored: Thu May 14 15:56:46 2015 -0700
Committer: cmarcel <cm...@salesforce.com>
Committed: Thu May 14 15:57:00 2015 -0700

----------------------------------------------------------------------
 phoenix-pherf/cluster/pherf.sh                                  | 2 +-
 .../main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java  | 5 ++---
 .../src/test/java/org/apache/phoenix/pherf/ResultTest.java      | 4 ++--
 phoenix-pherf/standalone/pherf.sh                               | 2 +-
 4 files changed, 6 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/289a875b/phoenix-pherf/cluster/pherf.sh
----------------------------------------------------------------------
diff --git a/phoenix-pherf/cluster/pherf.sh b/phoenix-pherf/cluster/pherf.sh
index aeff856..8d58dfe 100755
--- a/phoenix-pherf/cluster/pherf.sh
+++ b/phoenix-pherf/cluster/pherf.sh
@@ -28,6 +28,6 @@ for f in $PHERF_HOME/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
-CMD="time $}JAVA_HOME}/bin/java ${REMOTE_DEBUG} -Dapp.home=${PHERF_HOME} ${ENV_PROPS} -Xms512m -Xmx3072m -cp ${CLASSPATH} org.apache.phoenix.pherf.Pherf ${@}"
+CMD="time ${JAVA_HOME}/bin/java ${REMOTE_DEBUG} -Dapp.home=${PHERF_HOME} ${ENV_PROPS} -Xms512m -Xmx3072m -cp ${CLASSPATH} org.apache.phoenix.pherf.Pherf ${@}"
 
 eval $CMD
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/289a875b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
index 9f46cf7..6f97551 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
@@ -106,8 +106,9 @@ public class MonitorManager implements Runnable {
                         rowCount.getAndIncrement();
                     }
                     try {
+                        resultHandler.flush();
                         Thread.sleep(getMonitorFrequency());
-                    } catch (InterruptedException e) {
+                    } catch (Exception e) {
                         Thread.currentThread().interrupt();
                         e.printStackTrace();
                     }
@@ -117,9 +118,7 @@ public class MonitorManager implements Runnable {
             try {
                 isRunning = false;
                 if (resultHandler != null) {
-                    resultHandler.flush();
                     resultHandler.close();
-
                 }
             } catch (Exception e) {
                 throw new FileLoaderRuntimeException("Could not close monitor results.", e);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/289a875b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
index 0f4dfd1..c51f0dc 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
@@ -55,7 +55,7 @@ public class ResultTest {
             resultMonitorWriter.write(result);
             resultMonitorWriter.write(result);
             resultMonitorWriter.write(result);
-            resultMonitorWriter.flush();
+            resultMonitorWriter.close();
             List<Result> results = resultMonitorWriter.read();
             assertEquals("Results did not contain row.", results.size(), 3);
 
@@ -72,7 +72,7 @@ public class ResultTest {
         ExecutorService executorService = Executors.newFixedThreadPool(1);
         MonitorManager monitor = new MonitorManager(100);
         Future future = executorService.submit(monitor);
-        List<Result> records = null;
+        List<Result> records;
         final int TIMEOUT = 30;
 
         int ct = 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/289a875b/phoenix-pherf/standalone/pherf.sh
----------------------------------------------------------------------
diff --git a/phoenix-pherf/standalone/pherf.sh b/phoenix-pherf/standalone/pherf.sh
index e08035a..2b91d2c 100755
--- a/phoenix-pherf/standalone/pherf.sh
+++ b/phoenix-pherf/standalone/pherf.sh
@@ -24,5 +24,5 @@ for f in $PHERF_HOME/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
 done
 
-CMD="time $}JAVA_HOME}/bin/java ${REMOTE_DEBUG} -Dapp.home=${PHERF_HOME} ${ENV_PROPS} -Xms512m -Xmx3072m -cp ${CLASSPATH} org.apache.phoenix.pherf.Pherf ${@}"
+CMD="time ${JAVA_HOME}/bin/java ${REMOTE_DEBUG} -Dapp.home=${PHERF_HOME} ${ENV_PROPS} -Xms512m -Xmx3072m -cp ${CLASSPATH} org.apache.phoenix.pherf.Pherf ${@}"
 eval $CMD
\ No newline at end of file


[37/50] [abbrv] phoenix git commit: PHOENIX-2007 java.sql.SQLException: Encountered exception in sub plan [0] execution(Alicia Ying Shu)

Posted by ma...@apache.org.
PHOENIX-2007 java.sql.SQLException: Encountered exception in sub plan [0] execution(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/82df3b97
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/82df3b97
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/82df3b97

Branch: refs/heads/calcite
Commit: 82df3b97a9ca88605f78b59e547819ff3bf9cd7a
Parents: 583b5b1
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Mon Jun 1 21:04:43 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Mon Jun 1 21:04:43 2015 +0530

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 54 ++++++++++++++++++++
 .../apache/phoenix/execute/HashJoinPlan.java    |  7 +--
 2 files changed, 58 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/82df3b97/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index a03204a..88e03ca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -3813,6 +3813,60 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
         }
     }
 
+    @Test
+    public void testSubqueryWithoutData() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+
+        try {
+            String GRAMMAR_TABLE = "CREATE TABLE IF NOT EXISTS GRAMMAR_TABLE (ID INTEGER PRIMARY KEY, " +
+                    "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," +
+                    "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + 
+                    "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + 
+                    "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + 
+                    "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + 
+                    "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))";
+
+            String LARGE_TABLE = "CREATE TABLE IF NOT EXISTS LARGE_TABLE (ID INTEGER PRIMARY KEY, " +
+                    "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," +
+                    "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + 
+                    "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + 
+                    "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + 
+                    "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + 
+                    "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))";
+
+            String SECONDARY_LARGE_TABLE = "CREATE TABLE IF NOT EXISTS SECONDARY_LARGE_TABLE (SEC_ID INTEGER PRIMARY KEY," +
+                    "sec_unsig_id UNSIGNED_INT, sec_big_id BIGINT, sec_usnig_long_id UNSIGNED_LONG, sec_tiny_id TINYINT," + 
+                    "sec_unsig_tiny_id UNSIGNED_TINYINT, sec_small_id SMALLINT, sec_unsig_small_id UNSIGNED_SMALLINT," + 
+                    "sec_float_id FLOAT, sec_unsig_float_id UNSIGNED_FLOAT, sec_double_id DOUBLE, sec_unsig_double_id UNSIGNED_DOUBLE," +
+                    "sec_decimal_id DECIMAL, sec_boolean_id BOOLEAN, sec_time_id TIME, sec_date_id DATE," +
+                    "sec_timestamp_id TIMESTAMP, sec_unsig_time_id TIME, sec_unsig_date_id DATE, sec_unsig_timestamp_id TIMESTAMP," +
+                    "sec_varchar_id VARCHAR (30), sec_char_id CHAR (30), sec_binary_id BINARY (100), sec_varbinary_id VARBINARY (100))";
+            createTestTable(getUrl(), GRAMMAR_TABLE);
+            createTestTable(getUrl(), LARGE_TABLE);
+            createTestTable(getUrl(), SECONDARY_LARGE_TABLE);
+
+            String ddl = "SELECT * FROM (SELECT ID, BIG_ID, DATE_ID FROM LARGE_TABLE AS A WHERE (A.ID % 5) = 0) AS A " +
+                    "INNER JOIN (SELECT SEC_ID, SEC_TINY_ID, SEC_UNSIG_FLOAT_ID FROM SECONDARY_LARGE_TABLE AS B WHERE (B.SEC_ID % 5) = 0) AS B " +     
+                    "ON A.ID=B.SEC_ID WHERE A.DATE_ID > ALL (SELECT SEC_DATE_ID FROM SECONDARY_LARGE_TABLE LIMIT 100) " +      
+                    "AND B.SEC_UNSIG_FLOAT_ID = ANY (SELECT sec_unsig_float_id FROM SECONDARY_LARGE_TABLE " +                                       
+                    "WHERE SEC_ID > ALL (SELECT MIN (ID) FROM GRAMMAR_TABLE WHERE UNSIG_ID IS NULL) AND " +
+                    "SEC_UNSIG_ID < ANY (SELECT DISTINCT(UNSIG_ID) FROM LARGE_TABLE WHERE UNSIG_ID<2500) LIMIT 1000) " +
+                    "AND A.ID < 10000";
+            ResultSet rs = conn.createStatement().executeQuery(ddl);
+            assertFalse(rs.next());  
+        } finally {
+            Statement statement = conn.createStatement();
+            String query = "drop table GRAMMAR_TABLE";
+            statement.executeUpdate(query);
+            query = "drop table LARGE_TABLE";
+            statement.executeUpdate(query);
+            query = "drop table SECONDARY_LARGE_TABLE";
+            statement.executeUpdate(query);
+            conn.close();
+        }
+    }
 }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/82df3b97/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index aea075d..857a952 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -61,11 +61,12 @@ import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 
@@ -254,7 +255,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
             ImmutableBytesWritable ptr = new ImmutableBytesWritable();
             int columnCount = projector.getColumnCount();
             int rowCount = 0;
-            PDataType baseType = null;
+            PDataType baseType = PVarbinary.INSTANCE;
             for (Tuple tuple = iterator.next(); tuple != null; tuple = iterator.next()) {
                 if (expectSingleRow && rowCount >= 1)
                     throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException();


[10/50] [abbrv] phoenix git commit: PHOENIX-1956 SELECT (FALSE OR FALSE) RETURNS TRUE

Posted by ma...@apache.org.
PHOENIX-1956 SELECT (FALSE OR FALSE) RETURNS TRUE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c2fee39e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c2fee39e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c2fee39e

Branch: refs/heads/calcite
Commit: c2fee39efff87930ab3a00d4ed36ec32a493cf7d
Parents: 45a919f
Author: James Taylor <jt...@salesforce.com>
Authored: Fri May 8 13:13:44 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri May 8 13:14:24 2015 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/EvaluationOfORIT.java     | 11 +++++++++++
 .../org/apache/phoenix/compile/ExpressionCompiler.java   |  3 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2fee39e/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
index c9cc1e2..4355036 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/EvaluationOfORIT.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.end2end;
  
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
@@ -34,6 +35,16 @@ import org.junit.Test;
 
 public class EvaluationOfORIT extends BaseHBaseManagedTimeIT{
 		
+    @Test
+    public void testFalseOrFalse() throws SQLException {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        ResultSet rs = conn.createStatement().executeQuery("SELECT (FALSE OR FALSE) AS B FROM SYSTEM.CATALOG LIMIT 1");
+        assertTrue(rs.next());
+        assertFalse(rs.getBoolean(1));
+        conn.close();
+    }
+    
 	@Test
 	public void testPKOrNotPKInOREvaluation() throws SQLException {
 	    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2fee39e/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 92899a6..66c1b85 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -71,7 +71,6 @@ import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
 import org.apache.phoenix.expression.function.ArrayElemRefExpression;
 import org.apache.phoenix.expression.function.RoundDecimalExpression;
 import org.apache.phoenix.expression.function.RoundTimestampExpression;
-import org.apache.phoenix.expression.function.UDFExpression;
 import org.apache.phoenix.parse.AddParseNode;
 import org.apache.phoenix.parse.AndParseNode;
 import org.apache.phoenix.parse.ArithmeticParseNode;
@@ -261,7 +260,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
             determinism = determinism.combine(child.getDeterminism());
         }
         if (children.size() == 0) {
-            return LiteralExpression.newConstant(true, determinism);
+            return LiteralExpression.newConstant(false, determinism);
         }
         if (children.size() == 1) {
             return children.get(0);


[49/50] [abbrv] phoenix git commit: PHOENIX-2032 psql.py is broken after PHOENIX-2013

Posted by ma...@apache.org.
PHOENIX-2032 psql.py is broken after PHOENIX-2013


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1934afb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1934afb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1934afb

Branch: refs/heads/calcite
Commit: d1934afbe6230e823b9009950fe721165e98cc7c
Parents: bfb0eee
Author: Nick Dimiduk <nd...@apache.org>
Authored: Fri Jun 12 10:23:05 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Fri Jun 12 12:12:09 2015 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml              |  4 ----
 phoenix-assembly/src/build/client.xml | 27 +++++++++++++++++++++++----
 2 files changed, 23 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1934afb/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 51ff74d..baf6738 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -152,10 +152,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-spark</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-server</artifactId>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1934afb/phoenix-assembly/src/build/client.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/client.xml b/phoenix-assembly/src/build/client.xml
index 4bd4544..0e1e1f6 100644
--- a/phoenix-assembly/src/build/client.xml
+++ b/phoenix-assembly/src/build/client.xml
@@ -63,13 +63,32 @@
     </dependencySet>
 
     <!-- Make sure we get all the components, not just the minimal client ones (e.g.
-      phoenix-flume, phoenix-pig, etc) -->
+      phoenix-flume, phoenix-pig, etc). We should exclude phoenix-server and
+      phoenix-server-client in the future, see PHOENIX-2032, PHOENIX-2038 -->
     <dependencySet>
       <outputDirectory>/</outputDirectory>
       <unpack>true</unpack>
-      <includes>
-        <include>org.apache.phoenix:phoenix-*</include>
-      </includes>
+      <!-- multiple deps provide some variant of LICENSE files/directories. These
+           overwrite each other at best, at worst conflict on case-insensitive
+           filesystems like HDFS+ and FAT32. Just exclude them -->
+      <unpackOptions>
+        <excludes>
+          <exclude>*license*</exclude>
+          <exclude>*LICENSE*</exclude>
+          <exclude>**/license/**</exclude>
+          <exclude>**/LICENSE/**</exclude>
+        </excludes>
+      </unpackOptions>
+      <!-- this is default, but make intentions clear -->
+      <useTransitiveDependencies>true</useTransitiveDependencies>
+      <!-- "When <include> subelements are present, they define a set of
+           artifact coordinates to include. If none is present, then <includes>
+           represents all valid values"
+           https://maven.apache.org/plugins/maven-assembly-plugin/assembly.html#class_dependencySet
+           This means bring in all dependencies transitively of the
+           phoenix-assembly module.
+      -->
+      <includes />
     </dependencySet>
   </dependencySets>
 </assembly>


[04/50] [abbrv] phoenix git commit: PHOENIX-1882 Issue column family deletes instead of row deletes in PTableImpl

Posted by ma...@apache.org.
PHOENIX-1882 Issue column family deletes instead of row deletes in PTableImpl


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/efd7c9f7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/efd7c9f7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/efd7c9f7

Branch: refs/heads/calcite
Commit: efd7c9f735433c8512877ad3db194bb325bdde32
Parents: d2c1f2c
Author: Thomas <td...@salesforce.com>
Authored: Sun Apr 26 11:38:51 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Wed Apr 29 20:48:06 2015 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/MappingTableDataTypeIT.java | 67 ++++++++++++++------
 .../apache/phoenix/index/IndexMaintainer.java   |  7 --
 .../org/apache/phoenix/schema/PTableImpl.java   |  8 +--
 3 files changed, 53 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/efd7c9f7/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index 98e536e..9617e37 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -19,23 +19,31 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
+import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -52,18 +60,44 @@ public class MappingTableDataTypeIT extends BaseHBaseManagedTimeIT {
         try {
             // Create table then get the single region for our new table.
             HTableDescriptor descriptor = new HTableDescriptor(tableName);
-            HColumnDescriptor columnDescriptor =  new HColumnDescriptor(Bytes.toBytes("cf"));
-            descriptor.addFamily(columnDescriptor);
+            HColumnDescriptor columnDescriptor1 =  new HColumnDescriptor(Bytes.toBytes("cf1"));
+            HColumnDescriptor columnDescriptor2 =  new HColumnDescriptor(Bytes.toBytes("cf2"));
+            descriptor.addFamily(columnDescriptor1);
+            descriptor.addFamily(columnDescriptor2);
             admin.createTable(descriptor);
             HTableInterface t = conn.getQueryServices().getTable(Bytes.toBytes("MTEST"));
             insertData(tableName.getName(), admin, t);
             t.close();
-            try {
-                testCreateTableMismatchedType();
-                fail();
-            } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(),e.getErrorCode());
-            }
+            // create phoenix table that maps to existing HBase table
+            createPhoenixTable();
+            
+            String selectSql = "SELECT * FROM MTEST";
+            ResultSet rs = conn.createStatement().executeQuery(selectSql);
+            ResultSetMetaData rsMetaData = rs.getMetaData();
+            assertTrue("Expected single row", rs.next());
+            // verify values from cf2 is not returned
+            assertEquals("Number of columns", 2, rsMetaData.getColumnCount());
+            assertEquals("Column Value", "value1", rs.getString(2));
+            assertFalse("Expected single row ", rs.next());
+            
+            // delete the row
+            String deleteSql = "DELETE FROM MTEST WHERE id = 'row'";
+            conn.createStatement().executeUpdate(deleteSql);
+            conn.commit();
+            
+            // verify that no rows are returned when querying through phoenix
+            rs = conn.createStatement().executeQuery(selectSql);
+            assertFalse("Expected no row` ", rs.next());
+            
+            // verify that row with value for cf2 still exists when using hbase apis
+            Scan scan = new Scan();
+            ResultScanner results = t.getScanner(scan);
+            Result result = results.next();
+            assertNotNull("Expected single row", result);
+            List<KeyValue> kvs = result.getColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
+            assertEquals("Expected single value ", 1, kvs.size());
+            assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValue()));
+            assertNull("Expected single row", results.next());
         } finally {
             admin.close();
         }
@@ -72,26 +106,23 @@ public class MappingTableDataTypeIT extends BaseHBaseManagedTimeIT {
     private void insertData(final byte[] tableName, HBaseAdmin admin, HTableInterface t) throws IOException,
             InterruptedException {
         Put p = new Put(Bytes.toBytes("row"));
-        p.add(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("value1"));
+        p.add(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1"));
+        p.add(Bytes.toBytes("cf2"), Bytes.toBytes("q2"), Bytes.toBytes("value2"));
         t.put(p);
         t.flushCommits();
         admin.flush(tableName);
     }
 
     /**
-     * Test create a table in Phoenix with mismatched data type UNSIGNED_LONG
+     * Create a table in Phoenix that only maps column family cf1
      */
-    private void testCreateTableMismatchedType() throws SQLException {
+    private void createPhoenixTable() throws SQLException {
         String ddl = "create table IF NOT EXISTS MTEST (" + " id varchar NOT NULL primary key,"
-                + " \"cf\".\"q1\" unsigned_long" + " ) ";
+                + " \"cf1\".\"q1\" varchar" + " ) ";
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(ddl);
         conn.commit();
-        String query = "select * from MTEST";
-        ResultSet rs = conn.createStatement().executeQuery(query);
-        rs.next();
-        rs.getLong(2);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/efd7c9f7/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 4565f39..0956753 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -806,15 +806,8 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         for (KeyValue kv : pendingUpdates) {
             if (kv.getTypeByte() == KeyValue.Type.DeleteFamily.getCode()) {
                 nDeleteCF++;
-                boolean isEmptyCF = Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), 
-                  dataEmptyKeyValueCF, 0, dataEmptyKeyValueCF.length) == 0;
-                // This is what a delete looks like on the client side for immutable indexing...
-                if (isEmptyCF) {
-                    return true;
-                }
             }
         }
-        // This is what a delete looks like on the server side for mutable indexing...
         return nDeleteCF == this.nDataCFs;
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/efd7c9f7/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 2d523ff..bf4420c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -718,10 +718,10 @@ public class PTableImpl implements PTable {
         @Override
         public void delete() {
             newMutations();
-            // FIXME: the version of the Delete constructor without the lock args was introduced
-            // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
-            // of the client.
-            Delete delete = new Delete(key,ts);
+            Delete delete = new Delete(key);
+            for (PColumnFamily colFamily : families) {
+            	delete.addFamily(colFamily.getName().getBytes(), ts);
+            }
             deleteRow = delete;
             // No need to write to the WAL for indexes
             if (PTableImpl.this.getType() == PTableType.INDEX) {


[32/50] [abbrv] phoenix git commit: Revert "PHOENIX-2008 Integration tests are failing with HBase-1.1.0 because HBASE-13756(Rajeshbabu)"

Posted by ma...@apache.org.
Revert "PHOENIX-2008 Integration tests are failing with HBase-1.1.0 because HBASE-13756(Rajeshbabu)"

This reverts commit a28c1d3b2d31377f70e0a4c661c3c70d8bc99216.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/170e8cca
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/170e8cca
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/170e8cca

Branch: refs/heads/calcite
Commit: 170e8cca2f2e53002fa08ca16fa63d70248397ff
Parents: e493215
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Wed May 27 14:51:14 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Wed May 27 14:51:14 2015 +0530

----------------------------------------------------------------------
 phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/170e8cca/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 4aa28c4..54ae670 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -620,8 +620,6 @@ public abstract class BaseTest {
         }
         //no point doing sanity checks when running tests.
         conf.setBoolean("hbase.table.sanity.checks", false);
-        // Remove this configuration once hbase has HBASE-13756 fix.
-        conf.set("hbase.regionserver.msginterval", "300000");
         // set the server rpc controller and rpc scheduler factory, used to configure the cluster
         conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_SERVER_RPC_CONTROLLER_FACTORY);
         conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, DEFAULT_RPC_SCHEDULER_FACTORY);


[28/50] [abbrv] phoenix git commit: PHOENIX-1681 Use the new Region Interface (Andrew Purtell)

Posted by ma...@apache.org.
PHOENIX-1681 Use the new Region Interface (Andrew Purtell)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/edff624f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/edff624f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/edff624f

Branch: refs/heads/calcite
Commit: edff624f193324762fae04907c551e3d2fec93a3
Parents: 7bc9cce
Author: Enis Soztutar <en...@apache.org>
Authored: Thu May 21 23:22:54 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Fri May 22 00:16:31 2015 -0700

----------------------------------------------------------------------
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  4 +-
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |  4 +-
 .../IndexHalfStoreFileReaderGenerator.java      |  9 +-
 .../regionserver/IndexSplitTransaction.java     | 65 +++++---------
 .../hbase/regionserver/LocalIndexMerger.java    | 16 ++--
 .../hbase/regionserver/LocalIndexSplitter.java  | 11 +--
 .../coprocessor/BaseScannerRegionObserver.java  | 26 +++---
 .../GroupedAggregateRegionObserver.java         | 13 +--
 .../coprocessor/MetaDataEndpointImpl.java       | 94 ++++++++++----------
 .../phoenix/coprocessor/ScanRegionObserver.java | 17 ++--
 .../coprocessor/SequenceRegionObserver.java     | 16 ++--
 .../UngroupedAggregateRegionObserver.java       | 29 +++---
 .../hbase/index/covered/data/LocalTable.java    |  5 +-
 .../write/ParallelWriterIndexCommitter.java     |  8 +-
 .../recovery/PerRegionIndexWriteCache.java      | 10 +--
 .../recovery/StoreFailuresInCachePolicy.java    |  4 +-
 .../TrackingParallelWriterIndexCommitter.java   |  8 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |  4 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java | 14 ++-
 .../schema/stats/StatisticsCollector.java       | 14 +--
 .../phoenix/schema/stats/StatisticsScanner.java | 16 ++--
 .../phoenix/schema/stats/StatisticsWriter.java  | 16 ++--
 .../java/org/apache/phoenix/util/IndexUtil.java | 38 ++++----
 .../index/covered/TestLocalTableState.java      |  8 +-
 .../index/write/TestWALRecoveryCaching.java     | 17 ++--
 .../recovery/TestPerRegionIndexWriteCache.java  |  6 +-
 26 files changed, 230 insertions(+), 242 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 3b8ff29..611ba68 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -159,7 +159,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
   }
 
   /**
-   * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
+   * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify
    * seqids.
    * @throws Exception on failure
    */
@@ -183,7 +183,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
     builder.build(htd);
 
     // create the region + its WAL
-    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
+    HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); // FIXME: Uses private type
     region0.close();
     region0.getWAL().close();
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
index d90733f..6b2309e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.util.EnvironmentEdge;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
@@ -312,7 +312,7 @@ public class EndToEndCoveredColumnsIndexBuilderIT {
     HTable primary = new HTable(UTIL.getConfiguration(), tableNameBytes);
 
     // overwrite the codec so we can verify the current state
-    HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
+    Region region = UTIL.getMiniHBaseCluster().getRegions(tableNameBytes).get(0);
     Indexer indexer =
         (Indexer) region.getCoprocessorHost().findCoprocessor(Indexer.class.getName());
     CoveredColumnsIndexBuilder builder =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 1284dcf..94d5912 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -76,7 +76,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
             FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
             Reference r, Reader reader) throws IOException {
         TableName tableName = ctx.getEnvironment().getRegion().getTableDesc().getTableName();
-        HRegion region = ctx.getEnvironment().getRegion();
+        Region region = ctx.getEnvironment().getRegion();
         HRegionInfo childRegion = region.getRegionInfo();
         byte[] splitKey = null;
         if (reader == null && r != null) {
@@ -109,7 +109,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                     Pair<HRegionInfo, HRegionInfo> mergeRegions =
                             MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment()
                                     .getRegionServerServices().getConnection(),
-                                region.getRegionName());
+                                region.getRegionInfo().getRegionName());
                     if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
                     byte[] splitRow =
                             CellUtil.cloneRow(KeyValue.createKeyValueFromKey(r.getSplitKey()));
@@ -121,8 +121,9 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
                         childRegion = mergeRegions.getSecond();
                         regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
                     }
-                    splitKey = KeyValue.createFirstOnRow(region.getStartKey().length == 0 ?
-                            new byte[region.getEndKey().length] : region.getStartKey()).getKey();
+                    splitKey = KeyValue.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ?
+                        new byte[region.getRegionInfo().getEndKey().length] :
+                            region.getRegionInfo().getStartKey()).getKey();
                 } else {
                     HRegionInfo parentRegion = HRegionInfo.getHRegionInfo(result);
                     regionStartKeyInHFile =

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
index 3057a14..71bc520 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexSplitTransaction.java
@@ -65,31 +65,8 @@ import org.apache.zookeeper.data.Stat;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
-/**
- * Executes region split as a "transaction".  Call {@link #prepare()} to setup
- * the transaction, {@link #execute(Server, RegionServerServices)} to run the
- * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails.
- *
- * <p>Here is an example of how you would use this class:
- * <pre>
- *  SplitTransaction st = new SplitTransaction(this.conf, parent, midKey)
- *  if (!st.prepare()) return;
- *  try {
- *    st.execute(server, services);
- *  } catch (IOException ioe) {
- *    try {
- *      st.rollback(server, services);
- *      return;
- *    } catch (RuntimeException e) {
- *      myAbortable.abort("Failed split, abort");
- *    }
- *  }
- * </Pre>
- * <p>This class is not thread safe.  Caller needs ensure split is run by
- * one thread only.
- */
 @InterfaceAudience.Private
-public class IndexSplitTransaction extends SplitTransaction {
+public class IndexSplitTransaction extends SplitTransactionImpl { // FIXME: Extends private type
   private static final Log LOG = LogFactory.getLog(IndexSplitTransaction.class);
 
   /*
@@ -154,9 +131,9 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @param r Region to split
    * @param splitrow Row to split around
    */
-  public IndexSplitTransaction(final HRegion r, final byte [] splitrow) {
+  public IndexSplitTransaction(final Region r, final byte [] splitrow) {
     super(r , splitrow);
-    this.parent = r;
+    this.parent = (HRegion)r;
     this.splitrow = splitrow;
   }
 
@@ -217,7 +194,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @return Regions created
    */
   @Override
-  /* package */PairOfSameType<HRegion> createDaughters(final Server server,
+  /* package */PairOfSameType<Region> createDaughters(final Server server,
       final RegionServerServices services) throws IOException {
     LOG.info("Starting split of region " + this.parent);
     if ((server != null && server.isStopped()) ||
@@ -244,14 +221,14 @@ public class IndexSplitTransaction extends SplitTransaction {
         server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
           this.fileSplitTimeout);
 
-    PairOfSameType<HRegion> daughterRegions = stepsBeforePONR(server, services, testing);
+    PairOfSameType<Region> daughterRegions = stepsBeforePONR(server, services, testing);
 
     List<Mutation> metaEntries = new ArrayList<Mutation>();
     if (this.parent.getCoprocessorHost() != null) {
       if (this.parent.getCoprocessorHost().
           preSplitBeforePONR(this.splitrow, metaEntries)) {
         throw new IOException("Coprocessor bypassing region "
-            + this.parent.getRegionNameAsString() + " split.");
+            + this.parent.getRegionInfo().getRegionNameAsString() + " split.");
       }
       try {
         for (Mutation p : metaEntries) {
@@ -303,7 +280,7 @@ public class IndexSplitTransaction extends SplitTransaction {
   }
 
   @Override
-  public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
+  public PairOfSameType<Region> stepsBeforePONR(final Server server,
       final RegionServerServices services, boolean testing) throws IOException {
     // Set ephemeral SPLITTING znode up in zk.  Mocked servers sometimes don't
     // have zookeeper so don't do zk stuff if server or zookeeper is null
@@ -313,7 +290,7 @@ public class IndexSplitTransaction extends SplitTransaction {
           parent.getRegionInfo(), server.getServerName(), hri_a, hri_b);
       } catch (KeeperException e) {
         throw new IOException("Failed creating PENDING_SPLIT znode on " +
-          this.parent.getRegionNameAsString(), e);
+          this.parent.getRegionInfo().getRegionNameAsString(), e);
       }
     }
     this.journal.add(JournalEntry.SET_SPLITTING_IN_ZK);
@@ -367,12 +344,12 @@ public class IndexSplitTransaction extends SplitTransaction {
     // stuff in fs that needs cleanup -- a storefile or two.  Thats why we
     // add entry to journal BEFORE rather than AFTER the change.
     this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
-    HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
+    Region a = this.parent.createDaughterRegionFromSplits(this.hri_a);
 
     // Ditto
     this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
-    HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
-    return new PairOfSameType<HRegion>(a, b);
+    Region b = this.parent.createDaughterRegionFromSplits(this.hri_b);
+    return new PairOfSameType<Region>(a, b);
   }
 
   /**
@@ -387,7 +364,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    */
   @Override
   /* package */void openDaughters(final Server server,
-      final RegionServerServices services, HRegion a, HRegion b)
+      final RegionServerServices services, Region a, Region b)
       throws IOException {
     boolean stopped = server != null && server.isStopped();
     boolean stopping = services != null && services.isStopping();
@@ -400,8 +377,8 @@ public class IndexSplitTransaction extends SplitTransaction {
           " because stopping=" + stopping + ", stopped=" + stopped);
     } else {
       // Open daughters in parallel.
-      DaughterOpener aOpener = new DaughterOpener(server, a);
-      DaughterOpener bOpener = new DaughterOpener(server, b);
+      DaughterOpener aOpener = new DaughterOpener(server, (HRegion)a);
+      DaughterOpener bOpener = new DaughterOpener(server, (HRegion)b);
       aOpener.start();
       bOpener.start();
       try {
@@ -444,7 +421,7 @@ public class IndexSplitTransaction extends SplitTransaction {
    *          Call {@link #rollback(Server, RegionServerServices)}
    */
   /* package */void transitionZKNode(final Server server,
-      final RegionServerServices services, HRegion a, HRegion b)
+      final RegionServerServices services, Region a, Region b)
       throws IOException {
     // Tell master about split by updating zk.  If we fail, abort.
     if (server != null && server.getZooKeeper() != null) {
@@ -556,7 +533,7 @@ public class IndexSplitTransaction extends SplitTransaction {
         Thread.currentThread().interrupt();
       }
       throw new IOException("Failed getting SPLITTING znode on "
-        + parent.getRegionNameAsString(), e);
+        + parent.getRegionInfo().getRegionNameAsString(), e);
     }
   }
 
@@ -572,10 +549,10 @@ public class IndexSplitTransaction extends SplitTransaction {
    * @see #rollback(Server, RegionServerServices)
    */
   @Override
-  public PairOfSameType<HRegion> execute(final Server server,
+  public PairOfSameType<Region> execute(final Server server,
       final RegionServerServices services)
   throws IOException {
-    PairOfSameType<HRegion> regions = createDaughters(server, services);
+    PairOfSameType<Region> regions = createDaughters(server, services);
     if (this.parent.getCoprocessorHost() != null) {
       this.parent.getCoprocessorHost().preSplitAfterPONR();
     }
@@ -583,8 +560,8 @@ public class IndexSplitTransaction extends SplitTransaction {
   }
 
   @Override
-  public PairOfSameType<HRegion> stepsAfterPONR(final Server server,
-      final RegionServerServices services, PairOfSameType<HRegion> regions)
+  public PairOfSameType<Region> stepsAfterPONR(final Server server,
+      final RegionServerServices services, PairOfSameType<Region> regions)
       throws IOException {
     openDaughters(server, services, regions.getFirst(), regions.getSecond());
     transitionZKNode(server, services, regions.getFirst(), regions.getSecond());
@@ -871,7 +848,7 @@ public class IndexSplitTransaction extends SplitTransaction {
           this.parent.initialize();
         } catch (IOException e) {
           LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
-            this.parent.getRegionNameAsString(), e);
+            this.parent.getRegionInfo().getRegionNameAsString(), e);
           throw new RuntimeException(e);
         }
         break;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
index add9b72..e361343 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -38,12 +38,12 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
 
     private static final Log LOG = LogFactory.getLog(LocalIndexMerger.class);
 
-    private RegionMergeTransaction rmt = null;
-    private HRegion mergedRegion = null;
+    private RegionMergeTransactionImpl rmt = null; // FIXME: Use of private type
+    private HRegion mergedRegion = null; // FIXME: Use of private type
 
     @Override
     public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-            HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException {
+            Region regionA, Region regionB, List<Mutation> metaEntries) throws IOException {
         HTableDescriptor tableDesc = regionA.getTableDesc();
         if (SchemaUtil.isSystemTable(tableDesc.getName())) {
             return;
@@ -56,14 +56,14 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
             TableName indexTable =
                     TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
             if (!MetaTableAccessor.tableExists(rs.getConnection(), indexTable)) return;
-            HRegion indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
+            Region indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
             if (indexRegionA == null) {
                 LOG.warn("Index region corresponindg to data region " + regionA
                         + " not in the same server. So skipping the merge.");
                 ctx.bypass();
                 return;
             }
-            HRegion indexRegionB = IndexUtil.getIndexRegion(regionB, ctx.getEnvironment());
+            Region indexRegionB = IndexUtil.getIndexRegion(regionB, ctx.getEnvironment());
             if (indexRegionB == null) {
                 LOG.warn("Index region corresponindg to region " + regionB
                         + " not in the same server. So skipping the merge.");
@@ -71,7 +71,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
                 return;
             }
             try {
-                rmt = new RegionMergeTransaction(indexRegionA, indexRegionB, false);
+                rmt = new RegionMergeTransactionImpl(indexRegionA, indexRegionB, false);
                 if (!rmt.prepare(rss)) {
                     LOG.error("Prepare for the index regions merge [" + indexRegionA + ","
                             + indexRegionB + "] failed. So returning null. ");
@@ -97,7 +97,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
 
     @Override
     public void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-            HRegion regionA, HRegion regionB, HRegion mergedRegion) throws IOException {
+            Region regionA, Region regionB, Region mergedRegion) throws IOException {
         if (rmt != null && this.mergedRegion != null) {
             RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
             HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
@@ -107,7 +107,7 @@ public class LocalIndexMerger extends BaseRegionServerObserver {
 
     @Override
     public void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-            HRegion regionA, HRegion regionB) throws IOException {
+            Region regionA, Region regionB) throws IOException {
         HRegionServer rs = (HRegionServer) ctx.getEnvironment().getRegionServerServices();
         try {
             if (rmt != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index 9af8251..7882e25 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -52,8 +52,8 @@ public class LocalIndexSplitter extends BaseRegionObserver {
 
     private static final Log LOG = LogFactory.getLog(LocalIndexSplitter.class);
 
-    private SplitTransaction st = null;
-    private PairOfSameType<HRegion> daughterRegions = null;
+    private SplitTransactionImpl st = null; // FIXME: Uses private type
+    private PairOfSameType<Region> daughterRegions = null;
     private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
     private static final int SPLIT_TXN_MINIMUM_SUPPORTED_VERSION = VersionUtil
             .encodeVersion("0.98.9");
@@ -74,17 +74,18 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                     TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
             if (!MetaTableAccessor.tableExists(rss.getConnection(), indexTable)) return;
 
-            HRegion indexRegion = IndexUtil.getIndexRegion(environment);
+            Region indexRegion = IndexUtil.getIndexRegion(environment);
             if (indexRegion == null) {
                 LOG.warn("Index region corresponindg to data region " + environment.getRegion()
                         + " not in the same server. So skipping the split.");
                 ctx.bypass();
                 return;
             }
+            // FIXME: Uses private type
             try {
                 int encodedVersion = VersionUtil.encodeVersion(environment.getHBaseVersion());
                 if(encodedVersion >= SPLIT_TXN_MINIMUM_SUPPORTED_VERSION) {
-                    st = new SplitTransaction(indexRegion, splitKey);
+                    st = new SplitTransactionImpl(indexRegion, splitKey);
                     st.useZKForAssignment =
                             environment.getConfiguration().getBoolean("hbase.assignment.usezk",
                                 true);
@@ -98,7 +99,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
                     ctx.bypass();
                     return;
                 }
-                indexRegion.forceSplit(splitKey);
+                ((HRegion)indexRegion).forceSplit(splitKey);
                 daughterRegions = st.stepsBeforePONR(rss, rss, false);
                 HRegionInfo copyOfParent = new HRegionInfo(indexRegion.getRegionInfo());
                 copyOfParent.setOffline(true);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index fc74968..d9e64e2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -114,12 +114,12 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     }
 
 
-    private static void throwIfScanOutOfRegion(Scan scan, HRegion region) throws DoNotRetryIOException {
+    private static void throwIfScanOutOfRegion(Scan scan, Region region) throws DoNotRetryIOException {
         boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
         byte[] lowerInclusiveScanKey = scan.getStartRow();
         byte[] upperExclusiveScanKey = scan.getStopRow();
-        byte[] lowerInclusiveRegionKey = region.getStartKey();
-        byte[] upperExclusiveRegionKey = region.getEndKey();
+        byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey();
+        byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey();
         boolean isStaleRegionBoundaries;
         if (isLocalIndex) {
             byte[] expectedUpperRegionKey = scan.getAttribute(EXPECTED_UPPER_REGION_KEY);
@@ -201,7 +201,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 }
             }
         } catch (Throwable t) {
-            ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+            ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
             return null; // impossible
         }
     }
@@ -221,7 +221,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     protected RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
             final RegionScanner s, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
-            final HRegion dataRegion, final IndexMaintainer indexMaintainer,
+            final Region dataRegion, final IndexMaintainer indexMaintainer,
             final byte[][] viewConstants, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
         return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
@@ -246,7 +246,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
             final RegionScanner s, final Set<KeyValueColumnExpression> arrayKVRefs,
             final Expression[] arrayFuncRefs, final int offset, final Scan scan,
             final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
-            final HRegion dataRegion, final IndexMaintainer indexMaintainer,
+            final Region dataRegion, final IndexMaintainer indexMaintainer,
             final byte[][] viewConstants, final KeyValueSchema kvSchema,
             final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
             final ImmutableBytesWritable ptr) {
@@ -257,7 +257,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 try {
                     return s.next(results);
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                     return false; // impossible
                 }
             }
@@ -267,7 +267,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 try {
                     return s.next(result, scannerContext);
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                     return false; // impossible
                 }
             }
@@ -319,7 +319,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                     // There is a scanattribute set to retrieve the specific array element
                     return next;
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                     return false; // impossible
                 }
             }
@@ -346,10 +346,10 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
                 }
                 // There is a scanattribute set to retrieve the specific array element
                 return next;
-            } catch (Throwable t) {
-                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(), t);
+              } catch (Throwable t) {
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t);
                 return false; // impossible
-            }
+              }
             }
 
             private void replaceArrayIndexElement(final Set<KeyValueColumnExpression> arrayKVRefs,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 19a1663..d613688 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
@@ -112,8 +112,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
              * For local indexes, we need to set an offset on row key expressions to skip
              * the region start key.
              */
-            HRegion region = c.getEnvironment().getRegion();
-            offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
+            Region region = c.getEnvironment().getRegion();
+            offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
+                region.getRegionInfo().getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
 
@@ -128,7 +129,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
         byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
         List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
         TupleProjector tupleProjector = null;
-        HRegion dataRegion = null;
+        Region dataRegion = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
 
@@ -415,7 +416,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
             }
 
-            HRegion region = c.getEnvironment().getRegion();
+            Region region = c.getEnvironment().getRegion();
             region.startRegionOperation();
             try {
                 synchronized (scanner) {
@@ -495,7 +496,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 // If we're calculating no aggregate functions, we can exit at the
                 // start of a new row. Otherwise, we have to wait until an agg
                 int countOffset = rowAggregators.length == 0 ? 1 : 0;
-                HRegion region = c.getEnvironment().getRegion();
+                Region region = c.getEnvironment().getRegion();
                 region.startRegionOperation();
                 try {
                     synchronized (scanner) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index e613007..39a4956 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -99,8 +99,8 @@ import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegion.RowLock;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.RowLock;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -404,7 +404,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         try {
             // TODO: check that key is within region.getStartKey() and region.getEndKey()
             // and return special code to force client to lookup region from meta.
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -434,7 +434,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
+    private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, Region region,
             long clientTimeStamp) throws IOException, SQLException {
         Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
         RegionScanner scanner = region.getScanner(scan);
@@ -464,7 +464,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private List<PFunction> buildFunctions(List<byte[]> keys, HRegion region,
+    private List<PFunction> buildFunctions(List<byte[]> keys, Region region,
             long clientTimeStamp) throws IOException, SQLException {
         List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
         for (byte[] key : keys) {
@@ -914,7 +914,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 className.getString(), jarPath == null ? null : jarPath.getString(), timeStamp);
     }
     
-    private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
+    private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region region,
         long clientTimeStamp) throws IOException {
         if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
             return null;
@@ -942,7 +942,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     }
 
     
-    private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, HRegion region,
+    private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, Region region,
         long clientTimeStamp) throws IOException {
         if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
             return null;
@@ -989,7 +989,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key,
         ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp)
         throws IOException, SQLException {
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
         PTable table = (PTable)metaDataCache.getIfPresent(cacheKey);
         // We always cache the latest version - fault in if not in cache
@@ -1008,7 +1008,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private PFunction loadFunction(RegionCoprocessorEnvironment env, byte[] key,
             ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp)
             throws IOException, SQLException {
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
             PFunction function = (PFunction)metaDataCache.getIfPresent(cacheKey);
             // We always cache the latest version - fault in if not in cache
@@ -1051,7 +1051,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         schemaName, tableName);
             byte[] parentKey = parentTableName == null ? null : lockKey;
 
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(lockKey, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -1115,7 +1115,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         return;
                     }
                 }
-                // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
+                // TODO: Switch this to Region#batchMutate when we want to support indexes on the
                 // system
                 // table. Basically, we get all the locks that we don't already hold for all the
                 // tableMetadata rows. This ensures we don't have deadlock situations (ensuring
@@ -1125,7 +1125,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 // on the system table. This is an issue because of the way we manage batch mutation
                 // in the
                 // Indexer.
-                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                    HConstants.NO_NONCE);
 
                 // Invalidate the cache - the next getTable call will add it
                 // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
@@ -1151,9 +1152,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     }
 
 
-    private static void acquireLock(HRegion region, byte[] key, List<RowLock> locks)
+    private static void acquireLock(Region region, byte[] key, List<RowLock> locks)
         throws IOException {
-        RowLock rowLock = region.getRowLock(key);
+        RowLock rowLock = region.getRowLock(key, true);
         if (rowLock == null) {
             throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
         }
@@ -1167,7 +1168,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
      * TODO: should we pass a timestamp here?
      */
     @SuppressWarnings("deprecation")
-    private TableViewFinderResult findChildViews(HRegion region, byte[] tenantId, PTable table) throws IOException {
+    private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table) throws IOException {
         byte[] schemaName = table.getSchemaName().getBytes();
         byte[] tableName = table.getTableName().getBytes();
         boolean isMultiTenant = table.isMultiTenant();
@@ -1256,7 +1257,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes,
                         schemaName, tableName);
 
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -1280,7 +1281,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 }
                 Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                 // Commit the list of deletion.
-                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                    HConstants.NO_NONCE);
                 long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
                 for (ImmutableBytesPtr ckey : invalidateList) {
                     metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
@@ -1309,7 +1311,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
         long clientTimeStamp = MetaDataUtil.getClientTimeStamp(rowsToDelete);
 
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
 
         Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
@@ -1435,7 +1437,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
 
     private static interface ColumnMutator {
       MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
-          List<Mutation> tableMetadata, HRegion region,
+          List<Mutation> tableMetadata, Region region,
           List<ImmutableBytesPtr> invalidateList, List<RowLock> locks) throws IOException,
           SQLException;
     }
@@ -1449,7 +1451,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
         try {
             byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 return result;
@@ -1535,7 +1537,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     return result;
                 }
 
-                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                    HConstants.NO_NONCE);
                 // Invalidate from cache
                 for (ImmutableBytesPtr invalidateKey : invalidateList) {
                     metaDataCache.invalidate(invalidateKey);
@@ -1563,7 +1566,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
                 @Override
                 public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
-                        List<Mutation> tableMetaData, HRegion region,
+                        List<Mutation> tableMetaData, Region region,
                         List<ImmutableBytesPtr> invalidateList, List<RowLock> locks) {
                     byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
                     byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
@@ -1647,14 +1650,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         // get the co-processor environment
         // TODO: check that key is within region.getStartKey() and region.getEndKey()
         // and return special code to force client to lookup region from meta.
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         /*
          * Lock directly on key, though it may be an index table. This will just prevent a table
          * from getting rebuilt too often.
          */
         final boolean wasLocked = (rowLock != null);
         if (!wasLocked) {
-            rowLock = region.getRowLock(key);
+            rowLock = region.getRowLock(key, true);
             if (rowLock == null) {
                 throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
             }
@@ -1689,7 +1692,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     private List<PFunction> doGetFunctions(List<byte[]> keys, long clientTimeStamp) throws IOException, SQLException {
         Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache =
                 GlobalCache.getInstance(this.env).getMetaDataCache();
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         Collections.sort(keys, new Comparator<byte[]>() {
             @Override
             public int compare(byte[] o1, byte[] o2) {
@@ -1700,11 +1703,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
          * Lock directly on key, though it may be an index table. This will just prevent a table
          * from getting rebuilt too often.
          */
-        List<RowLock> rowLocks = new ArrayList<HRegion.RowLock>(keys.size());;
+        List<RowLock> rowLocks = new ArrayList<Region.RowLock>(keys.size());;
         try {
-            rowLocks = new ArrayList<HRegion.RowLock>(keys.size());
+            rowLocks = new ArrayList<Region.RowLock>(keys.size());
             for (int i = 0; i < keys.size(); i++) {
-                HRegion.RowLock rowLock = region.getRowLock(keys.get(i));
+                Region.RowLock rowLock = region.getRowLock(keys.get(i), true);
                 if (rowLock == null) {
                     throw new IOException("Failed to acquire lock on "
                             + Bytes.toStringBinary(keys.get(i)));
@@ -1737,7 +1740,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             if(functionsAvailable.size() == numFunctions) return functionsAvailable;
             return null;
         } finally {
-            for (HRegion.RowLock lock : rowLocks) {
+            for (Region.RowLock lock : rowLocks) {
                 lock.release();
             }
             rowLocks.clear();
@@ -1756,7 +1759,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
                 @Override
                 public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
-                        List<Mutation> tableMetaData, HRegion region,
+                        List<Mutation> tableMetaData, Region region,
                         List<ImmutableBytesPtr> invalidateList, List<RowLock> locks)
                         throws IOException, SQLException {
                     byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
@@ -1904,7 +1907,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
             tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
             byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkTableKeyInRegion(key, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -1928,7 +1931,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             }
             PIndexState newState =
                     PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
-            RowLock rowLock = region.getRowLock(key);
+            RowLock rowLock = region.getRowLock(key, true);
             if (rowLock == null) {
                 throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
             }
@@ -2019,7 +2022,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
                         tableMetadata.add(p);
                     }
-                    region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
+                    region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+                        HConstants.NO_NONCE);
                     // Invalidate from cache
                     Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                     metaDataCache.invalidate(cacheKey);
@@ -2044,9 +2048,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
 
-    private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, HRegion region) {
-        byte[] startKey = region.getStartKey();
-        byte[] endKey = region.getEndKey();
+    private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) {
+        byte[] startKey = region.getRegionInfo().getStartKey();
+        byte[] endKey = region.getRegionInfo().getEndKey();
         if (Bytes.compareTo(startKey, key) <= 0
                 && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key,
                     endKey) < 0)) {
@@ -2056,9 +2060,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 EnvironmentEdgeManager.currentTimeMillis(), null);
     }
 
-    private static MetaDataMutationResult checkFunctionKeyInRegion(byte[] key, HRegion region) {
-        byte[] startKey = region.getStartKey();
-        byte[] endKey = region.getEndKey();
+    private static MetaDataMutationResult checkFunctionKeyInRegion(byte[] key, Region region) {
+        byte[] startKey = region.getRegionInfo().getStartKey();
+        byte[] endKey = region.getRegionInfo().getEndKey();
         if (Bytes.compareTo(startKey, key) <= 0
                 && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key,
                     endKey) < 0)) {
@@ -2135,7 +2139,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         byte[] tenantId = request.getTenantId().toByteArray();
         List<String> functionNames = new ArrayList<>(request.getFunctionNamesCount());
         try {
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             List<ByteString> functionNamesList = request.getFunctionNamesList();
             List<Long> functionTimestampsList = request.getFunctionTimestampsList();
             List<byte[]> keys = new ArrayList<byte[]>(request.getFunctionNamesCount());
@@ -2189,7 +2193,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
             functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
             byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName);
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -2225,7 +2229,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                 }
                 // Don't store function info for temporary functions.
                 if(!temporaryFunction) {
-                    region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]> emptySet());
+                    region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
                 }
 
                 // Invalidate the cache - the next getFunction call will add it
@@ -2259,7 +2263,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
             functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
             byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName);
-            HRegion region = env.getRegion();
+            Region region = env.getRegion();
             MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region);
             if (result != null) {
                 done.run(MetaDataMutationResult.toProto(result));
@@ -2278,7 +2282,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     done.run(MetaDataMutationResult.toProto(result));
                     return;
                 }
-                region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]> emptySet());
+                region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
 
                 Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                 long currentTime = MetaDataUtil.getClientTimeStamp(functionMetaData);
@@ -2322,7 +2326,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                             EnvironmentEdgeManager.currentTimeMillis(), null);
                 }
                 invalidateList.add(new FunctionBytesPtr(keys.get(0)));
-                HRegion region = env.getRegion();
+                Region region = env.getRegion();
                 Scan scan = MetaDataUtil.newTableRowsScan(keys.get(0), MIN_TABLE_TIMESTAMP, clientTimeStamp);
                 List<Cell> results = Lists.newArrayList();
                 try (RegionScanner scanner = region.getScanner(scan);) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 77e124d..54c688a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.cache.GlobalCache;
@@ -176,8 +176,9 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
              * For local indexes, we need to set an offset on row key expressions to skip
              * the region start key.
              */
-            HRegion region = c.getEnvironment().getRegion();
-            offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
+            Region region = c.getEnvironment().getRegion();
+            offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
+                region.getRegionInfo().getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
 
@@ -187,7 +188,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         Expression[] arrayFuncRefs = deserializeArrayPostionalExpressionInfoFromScan(
                 scan, innerScanner, arrayKVRefs);
         TupleProjector tupleProjector = null;
-        HRegion dataRegion = null;
+        Region dataRegion = null;
         IndexMaintainer indexMaintainer = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
@@ -231,7 +232,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), tenantId);
         long estSize = iterator.getEstimatedByteSize();
         final MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
-        final HRegion region = c.getEnvironment().getRegion();
+        final Region region = c.getEnvironment().getRegion();
         region.startRegionOperation();
         try {
             // Once we return from the first call to next, we've run through and cached
@@ -241,7 +242,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
             long actualSize = iterator.getByteSize();
             chunk.resize(actualSize);
         } catch (Throwable t) {
-            ServerUtil.throwIOException(region.getRegionNameAsString(), t);
+            ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
             return null;
         } finally {
             region.closeRegionOperation();
@@ -273,7 +274,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                     tuple = iterator.next();
                     return !isFilterDone();
                 } catch (Throwable t) {
-                    ServerUtil.throwIOException(region.getRegionNameAsString(), t);
+                    ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
                     return false;
                 }
             }
@@ -288,7 +289,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                             iterator.close();
                         }
                     } catch (SQLException e) {
-                        ServerUtil.throwIOException(region.getRegionNameAsString(), e);
+                        ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), e);
                     } finally {
                         chunk.close();
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
index 7953933..9b5f040 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegion.RowLock;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Region.RowLock;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -88,9 +88,9 @@ public class SequenceRegionObserver extends BaseRegionObserver {
                         QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf)));
     }
     
-    private static void acquireLock(HRegion region, byte[] key, List<RowLock> locks)
+    private static void acquireLock(Region region, byte[] key, List<RowLock> locks)
         throws IOException {
-        RowLock rowLock = region.getRowLock(key);
+        RowLock rowLock = region.getRowLock(key, true);
         if (rowLock == null) {
             throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
         }
@@ -114,7 +114,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
         // We need to set this to prevent region.increment from being called
         e.bypass();
         e.complete();
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         byte[] row = increment.getRow();
         List<RowLock> locks = Lists.newArrayList();
         TimeRange tr = increment.getTimeRange();
@@ -251,7 +251,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
                 }
                 // update the KeyValues on the server
                 Mutation[] mutations = new Mutation[]{put};
-                region.batchMutate(mutations);
+                region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
                 // return a Result with the updated KeyValues
                 return Result.create(cells);
             } finally {
@@ -345,7 +345,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
         // We need to set this to prevent region.append from being called
         e.bypass();
         e.complete();
-        HRegion region = env.getRegion();
+        Region region = env.getRegion();
         byte[] row = append.getRow();
         List<RowLock> locks = Lists.newArrayList();
         region.startRegionOperation();
@@ -400,7 +400,7 @@ public class SequenceRegionObserver extends BaseRegionObserver {
                     }
                 }
                 Mutation[] mutations = new Mutation[]{m};
-                region.batchMutate(mutations);
+                region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
                 long serverTimestamp = MetaDataUtil.getClientTimeStamp(m);
                 // Return result with single KeyValue. The only piece of information
                 // the client cares about is the timestamp, which is the timestamp of

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2d6d98a..d5cc486 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
@@ -125,7 +125,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         this.kvBuilder = GenericKeyValueBuilder.INSTANCE;
     }
 
-    private static void commitBatch(HRegion region, List<Mutation> mutations, byte[] indexUUID) throws IOException {
+    private static void commitBatch(Region region, List<Mutation> mutations, byte[] indexUUID) throws IOException {
       if (indexUUID != null) {
           for (Mutation m : mutations) {
               m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
@@ -133,7 +133,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
       }
       Mutation[] mutationArray = new Mutation[mutations.size()];
       // TODO: should we use the one that is all or none?
-      region.batchMutate(mutations.toArray(mutationArray));
+      region.batchMutate(mutations.toArray(mutationArray), HConstants.NO_NONCE, HConstants.NO_NONCE);
     }
 
     public static void serializeIntoScan(Scan scan) {
@@ -158,7 +158,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     @Override
     protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException {
         int offset = 0;
-        HRegion region = c.getEnvironment().getRegion();
+        Region region = c.getEnvironment().getRegion();
         long ts = scan.getTimeRange().getMax();
         StatisticsCollector stats = null;
         if(ScanUtil.isAnalyzeTable(scan)) {
@@ -172,7 +172,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
              * For local indexes, we need to set an offset on row key expressions to skip
              * the region start key.
              */
-            offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
+            offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length :
+                region.getRegionInfo().getEndKey().length;
             ScanUtil.setRowKeyOffset(scan, offset);
         }
 
@@ -212,7 +213,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
             ptr = new ImmutableBytesWritable();
         }
         TupleProjector tupleProjector = null;
-        HRegion dataRegion = null;
+        Region dataRegion = null;
         byte[][] viewConstants = null;
         ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
         boolean localIndexScan = ScanUtil.isLocalIndex(scan);
@@ -279,8 +280,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                                                 results);
                                         Put put = maintainer.buildUpdateMutation(kvBuilder,
                                             valueGetter, ptr, ts,
-                                            c.getEnvironment().getRegion().getStartKey(),
-                                            c.getEnvironment().getRegion().getEndKey());
+                                            c.getEnvironment().getRegion().getRegionInfo().getStartKey(),
+                                            c.getEnvironment().getRegion().getRegionInfo().getEndKey());
                                         indexMutations.add(put);
                                     }
                                 }
@@ -391,7 +392,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                         } catch (ConstraintViolationException e) {
                             // Log and ignore in count
                             logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
-                                region.getRegionNameAsString() + " with values " +
+                                region.getRegionInfo().getRegionNameAsString() + " with values " +
                                 SchemaUtil.toString(values),
                                 ScanUtil.getCustomAnnotations(scan)), e);
                             continue;
@@ -479,9 +480,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
     }
 
     private void commitIndexMutations(final ObserverContext<RegionCoprocessorEnvironment> c,
-            HRegion region, List<Mutation> indexMutations) throws IOException {
+            Region region, List<Mutation> indexMutations) throws IOException {
         // Get indexRegion corresponding to data region
-        HRegion indexRegion = IndexUtil.getIndexRegion(c.getEnvironment());
+        Region indexRegion = IndexUtil.getIndexRegion(c.getEnvironment());
         if (indexRegion != null) {
             commitBatch(indexRegion, indexMutations, null);
         } else {
@@ -493,7 +494,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                 table = c.getEnvironment().getTable(indexTable);
                 table.batch(indexMutations);
             } catch (InterruptedException ie) {
-                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(),
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(),
                     ie);
             } finally {
                 if (table != null) table.close();
@@ -534,9 +535,9 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
 
 
     @Override
-    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion l, HRegion r)
+    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, Region l, Region r)
             throws IOException {
-        HRegion region = e.getEnvironment().getRegion();
+        Region region = e.getEnvironment().getRegion();
         TableName table = region.getRegionInfo().getTable();
         StatisticsCollector stats = null;
         try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
index 71cc1d6..549fe8c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalTable.java
@@ -24,12 +24,11 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -60,7 +59,7 @@ public class LocalTable implements LocalHBaseState {
     Scan s = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columns));
     s.setStartRow(row);
     s.setStopRow(row);
-    HRegion region = this.env.getRegion();
+    Region region = this.env.getRegion();
     RegionScanner scanner = region.getScanner(s);
     List<Cell> kvs = new ArrayList<Cell>(1);
     boolean more = scanner.next(kvs);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index f72dec0..56bf637 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -21,11 +21,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure;
 import org.apache.phoenix.hbase.index.parallel.QuickFailingTaskRunner;
@@ -150,10 +151,11 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
                         // as well.
                         try {
                             if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
-                                HRegion indexRegion = IndexUtil.getIndexRegion(env);
+                                Region indexRegion = IndexUtil.getIndexRegion(env);
                                 if (indexRegion != null) {
                                     throwFailureIfDone();
-                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
+                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
+                                        HConstants.NO_NONCE, HConstants.NO_NONCE);
                                     return null;
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
index 4d5f667..26da2d5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java
@@ -22,7 +22,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
@@ -32,8 +32,8 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 
 public class PerRegionIndexWriteCache {
 
-  private Map<HRegion, Multimap<HTableInterfaceReference, Mutation>> cache =
-      new HashMap<HRegion, Multimap<HTableInterfaceReference, Mutation>>();
+  private Map<Region, Multimap<HTableInterfaceReference, Mutation>> cache =
+      new HashMap<Region, Multimap<HTableInterfaceReference, Mutation>>();
 
 
   /**
@@ -43,7 +43,7 @@ public class PerRegionIndexWriteCache {
    * @return Get the edits for the given region. Returns <tt>null</tt> if there are no pending edits
    *         for the region
    */
-  public Multimap<HTableInterfaceReference, Mutation> getEdits(HRegion region) {
+  public Multimap<HTableInterfaceReference, Mutation> getEdits(Region region) {
     return cache.remove(region);
   }
 
@@ -52,7 +52,7 @@ public class PerRegionIndexWriteCache {
    * @param table
    * @param collection
    */
-  public void addEdits(HRegion region, HTableInterfaceReference table,
+  public void addEdits(Region region, HTableInterfaceReference table,
       Collection<Mutation> collection) {
     Multimap<HTableInterfaceReference, Mutation> edits = cache.get(region);
     if (edits == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
index f36affb..189f970 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java
@@ -23,7 +23,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 
 import com.google.common.collect.Multimap;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
@@ -41,7 +41,7 @@ public class StoreFailuresInCachePolicy implements IndexFailurePolicy {
 
   private KillServerOnFailurePolicy delegate;
   private PerRegionIndexWriteCache cache;
-  private HRegion region;
+  private Region region;
 
   /**
    * @param failedIndexEdits cache to update when we find a failure

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index 9171b53..b1b2656 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -23,11 +23,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.phoenix.hbase.index.CapturingAbortable;
 import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
 import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
@@ -154,10 +155,11 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                             // index is pretty hacky. If we're going to keep this, we should revisit that
                             // as well.
                             if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
-                                HRegion indexRegion = IndexUtil.getIndexRegion(env);
+                                Region indexRegion = IndexUtil.getIndexRegion(env);
                                 if (indexRegion != null) {
                                     throwFailureIfDone();
-                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
+                                    indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
+                                        HConstants.NO_NONCE, HConstants.NO_NONCE);
                                     return Boolean.TRUE;
                                 }
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index b5e6a63..7a45e21 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
@@ -73,7 +73,7 @@ public class PhoenixIndexBuilder extends CoveredColumnsIndexBuilder {
         ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN);
         scanRanges.initializeScan(scan);
         scan.setFilter(scanRanges.getSkipScanFilter());
-        HRegion region = this.env.getRegion();
+        Region region = this.env.getRegion();
         RegionScanner scanner = region.getScanner(scan);
         // Run through the scanner using internal nextRaw method
         region.startRegionOperation();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/edff624f/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
index 99e26d1..222aefb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
@@ -24,9 +24,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Pair;
@@ -166,14 +164,14 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
             Mutation mutation = null;
             if (upsert) {
                 mutation =
-                        maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, state
-                                .getCurrentTimestamp(), env.getRegion().getStartKey(), env
-                                .getRegion().getEndKey());
+                        maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, state.getCurrentTimestamp(),
+                            env.getRegion().getRegionInfo().getStartKey(),
+                            env.getRegion().getRegionInfo().getEndKey());
             } else {
                 mutation =
-                        maintainer.buildDeleteMutation(kvBuilder, valueGetter, ptr, state
-                                .getPendingUpdate(), state.getCurrentTimestamp(), env.getRegion()
-                                .getStartKey(), env.getRegion().getEndKey());
+                        maintainer.buildDeleteMutation(kvBuilder, valueGetter, ptr, state.getPendingUpdate(),
+                            state.getCurrentTimestamp(), env.getRegion().getRegionInfo().getStartKey(),
+                            env.getRegion().getRegionInfo().getEndKey());
             }
             indexUpdate.setUpdate(mutation);
             if (scanner != null) {


[41/50] [abbrv] phoenix git commit: PHOENIX-777 - Support null value for fixed length ARRAY - Addendum (Ram)

Posted by ma...@apache.org.
PHOENIX-777 - Support null value for fixed length ARRAY - Addendum (Ram)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6c3d50ac
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6c3d50ac
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6c3d50ac

Branch: refs/heads/calcite
Commit: 6c3d50ac198dd9159fb50cfe898734db99257c10
Parents: 7f6bf10
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Jun 2 14:32:02 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Jun 2 14:32:02 2015 +0530

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/schema/types/PTimestamp.java   | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c3d50ac/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index d396adc..16b110e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
 
 public class PTimestamp extends PDataType<Timestamp> {
@@ -47,6 +48,10 @@ public class PTimestamp extends PDataType<Timestamp> {
   @Override
   public int toBytes(Object object, byte[] bytes, int offset) {
     if (object == null) {
+      // Create the byte[] of size MAX_TIMESTAMP_BYTES
+      if(bytes.length != getByteSize()) {
+          bytes = Bytes.padTail(bytes, (getByteSize() - bytes.length));
+      }
       PDate.INSTANCE.getCodec().encodeLong(0l, bytes, offset);
       Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, 0);
       return getByteSize();


[03/50] [abbrv] phoenix git commit: PHOENIX-1930 [BW COMPAT] Queries hangs with client on Phoenix 4.3.0 and server on 4.x-HBase-0.98

Posted by ma...@apache.org.
PHOENIX-1930 [BW COMPAT] Queries hangs with client on Phoenix 4.3.0 and server on 4.x-HBase-0.98


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d2c1f2c0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d2c1f2c0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d2c1f2c0

Branch: refs/heads/calcite
Commit: d2c1f2c0a6a0994da296b20158697fa725f1b4a7
Parents: 902cf0d
Author: Thomas <td...@salesforce.com>
Authored: Wed Apr 29 15:44:13 2015 -0700
Committer: Thomas <td...@salesforce.com>
Committed: Wed Apr 29 16:36:04 2015 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/phoenix/expression/ExpressionType.java  | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2c1f2c0/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 843a768..71f0521 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -214,6 +214,10 @@ public enum ExpressionType {
     ByteBasedRegexpReplaceFunction(ByteBasedRegexpReplaceFunction.class),
     ByteBasedRegexpSubstrFunction(ByteBasedRegexpSubstrFunction.class),
     ByteBasedRegexpSplitFunction(ByteBasedRegexpSplitFunction.class),
+    LikeExpression(LikeExpression.class),
+    RegexpReplaceFunction(RegexpReplaceFunction.class),
+    RegexpSubstrFunction(RegexpSubstrFunction.class),
+    RegexpSplitFunction(RegexpSplitFunction.class),
     SignFunction(SignFunction.class),
     YearFunction(YearFunction.class),
     MonthFunction(MonthFunction.class),


[35/50] [abbrv] phoenix git commit: PHOENIX-2010 Properly validate number of arguments passed to the functions in FunctionParseNode#validate(Rajeshbabu)

Posted by ma...@apache.org.
PHOENIX-2010 Properly validate number of arguments passed to the functions in FunctionParseNode#validate(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b7f13824
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b7f13824
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b7f13824

Branch: refs/heads/calcite
Commit: b7f138246328ea80ce53fb73539a1e48413a32d2
Parents: 08fc27d
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Sun May 31 07:40:16 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Sun May 31 07:40:16 2015 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/UserDefinedFunctionsIT.java       | 14 ++++++++++++++
 .../org/apache/phoenix/parse/FunctionParseNode.java   |  4 ++++
 .../main/java/org/apache/phoenix/parse/PFunction.java |  4 +---
 3 files changed, 19 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f13824/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index 7dbde3c..868e19d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -442,6 +442,20 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
         rs = stmt.executeQuery("select k from t9 where mysum9(k)=11");
         assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
+        try {
+            rs = stmt.executeQuery("select k from t9 where mysum9(k,10,'x')=11");
+            fail("FunctionNotFoundException should be thrown");
+        } catch(FunctionNotFoundException e) {
+        } catch(Exception e) {
+            fail("FunctionNotFoundException should be thrown");
+        }
+        try {
+            rs = stmt.executeQuery("select mysum9() from t9");
+            fail("FunctionNotFoundException should be thrown");
+        } catch(FunctionNotFoundException e) {
+        } catch(Exception e) {
+            fail("FunctionNotFoundException should be thrown");
+        }
         stmt.execute("drop function mysum9");
         try {
             rs = stmt.executeQuery("select k from t9 where mysum9(k)=11");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f13824/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
index d1001ee..be52d89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
@@ -41,6 +41,7 @@ import org.apache.phoenix.expression.function.FunctionExpression;
 import org.apache.phoenix.expression.function.UDFExpression;
 import org.apache.phoenix.parse.PFunction.FunctionArgument;
 import org.apache.phoenix.schema.ArgumentTypeMismatchException;
+import org.apache.phoenix.schema.FunctionNotFoundException;
 import org.apache.phoenix.schema.ValueRangeExcpetion;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDataTypeFactory;
@@ -133,6 +134,9 @@ public class FunctionParseNode extends CompoundParseNode {
     public List<Expression> validate(List<Expression> children, StatementContext context) throws SQLException {
         BuiltInFunctionInfo info = this.getInfo();
         BuiltInFunctionArgInfo[] args = info.getArgs();
+        if (args.length < children.size() || info.getRequiredArgCount() > children.size()) {
+            throw new FunctionNotFoundException(this.name);
+        }
         if (args.length > children.size()) {
             List<Expression> moreChildren = new ArrayList<Expression>(children);
             for (int i = children.size(); i < info.getArgs().length; i++) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b7f13824/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
index f4bac35..8a95ae7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
@@ -95,9 +95,7 @@ public class PFunction implements PMetaDataEntity {
     }
 
     public PFunction(PFunction function) {
-        this(function.getTenantId(), function.getFunctionName(), function.getFunctionArguments(),
-                function.getReturnType(), function.getClassName(), function.getJarPath(), function
-                        .getTimeStamp());
+        this(function, function.isTemporaryFunction());
     }
 
     public String getFunctionName() {


[11/50] [abbrv] phoenix git commit: PHOENIX-1958 Minimize memory allocation on new connection

Posted by ma...@apache.org.
PHOENIX-1958 Minimize memory allocation on new connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd81738b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd81738b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd81738b

Branch: refs/heads/calcite
Commit: cd81738b1fbcb5cf19123b2dca8da31f602b9c64
Parents: c2fee39
Author: James Taylor <jt...@salesforce.com>
Authored: Sat May 9 10:18:57 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat May 9 10:18:57 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/jdbc/PhoenixConnection.java  | 41 +++++++++++---------
 .../org/apache/phoenix/util/ReadOnlyProps.java  | 32 +++++++++++++++
 2 files changed, 54 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd81738b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index c22a7fa..dad60c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -48,7 +48,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.concurrent.Executor;
 
@@ -56,6 +55,8 @@ import javax.annotation.Nullable;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.TraceScope;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -95,15 +96,12 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.TraceScope;
 
 import com.google.common.base.Objects;
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 
 /**
@@ -185,21 +183,9 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
             if (tenantId != null) {
                 services = services.getChildQueryServices(tenantId.getBytesPtr());
             }
-            // TODO: we could avoid creating another wrapper if the only property
-            // specified was for the tenant ID
-            Map<String, String> existingProps = services.getProps().asMap();
-            final Map<String, String> tmpAugmentedProps = Maps.newHashMapWithExpectedSize(existingProps.size() + info.size());
-            tmpAugmentedProps.putAll(existingProps);
-            boolean needsDelegate = false;
-            for (Entry<Object, Object> entry : this.info.entrySet()) {
-                String key = entry.getKey().toString();
-                String value = entry.getValue().toString();
-                String oldValue = tmpAugmentedProps.put(key, value);
-                needsDelegate |= !Objects.equal(oldValue, value);
-            }
-            this.services = !needsDelegate ? services : new DelegateConnectionQueryServices(services) {
-                final ReadOnlyProps augmentedProps = new ReadOnlyProps(tmpAugmentedProps);
-    
+            ReadOnlyProps currentProps = services.getProps();
+            final ReadOnlyProps augmentedProps = currentProps.addAll(filterKnownNonProperties(this.info));
+            this.services = augmentedProps == currentProps ? services : new DelegateConnectionQueryServices(services) {
                 @Override
                 public ReadOnlyProps getProps() {
                     return augmentedProps;
@@ -261,6 +247,23 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
         this.customTracingAnnotations = getImmutableCustomTracingAnnotations();
     }
     
+    private static Properties filterKnownNonProperties(Properties info) {
+        Properties prunedProperties = info;
+        if (info.contains(PhoenixRuntime.CURRENT_SCN_ATTRIB)) {
+            if (prunedProperties == info) {
+                prunedProperties = PropertiesUtil.deepCopy(info);
+            }
+            prunedProperties.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB);
+        }
+        if (info.contains(PhoenixRuntime.TENANT_ID_ATTRIB)) {
+            if (prunedProperties == info) {
+                prunedProperties = PropertiesUtil.deepCopy(info);
+            }
+            prunedProperties.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
+        }
+        return prunedProperties;
+    }
+
     private ImmutableMap<String, String> getImmutableCustomTracingAnnotations() {
     	Builder<String, String> result = ImmutableMap.builder();
     	result.putAll(JDBCUtil.getAnnotations(url, info));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd81738b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index 68b0879..47137ef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -23,10 +23,13 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Objects;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
 
 /**
  * 
@@ -61,6 +64,17 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
         this.props = ImmutableMap.copyOf(props);
     }
 
+    private ReadOnlyProps(ReadOnlyProps defaultProps, Properties overrides) {
+        Map<String,String> combinedProps = Maps.newHashMapWithExpectedSize(defaultProps.props.size() + overrides.size());
+        combinedProps.putAll(defaultProps.props);
+        for (Entry<Object, Object> entry : overrides.entrySet()) {
+            String key = entry.getKey().toString();
+            String value = entry.getValue().toString();
+            combinedProps.put(key, value);
+        }
+        this.props = ImmutableMap.copyOf(combinedProps);
+    }
+
     private static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");
     private static int MAX_SUBST = 20;
 
@@ -269,4 +283,22 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
     public boolean isEmpty() {
         return props.isEmpty();
     }
+
+    /**
+     * Constructs new map only if necessary for adding the override properties.
+     * @param overrides Map of properties to override current properties.
+     * @return new ReadOnlyProps if in applying the overrides there are
+     * modifications to the current underlying Map, otherwise returns this.
+     */
+    public ReadOnlyProps addAll(Properties overrides) {
+        for (Entry<Object, Object> entry : overrides.entrySet()) {
+            String key = entry.getKey().toString();
+            String value = entry.getValue().toString();
+            String oldValue = props.get(key);
+            if (!Objects.equal(oldValue, value)) {
+                return new ReadOnlyProps(this, overrides);
+            }
+        }
+        return this;
+    }
 }


[47/50] [abbrv] phoenix git commit: PHOENIX-2033 PQS log environment details on launch

Posted by ma...@apache.org.
PHOENIX-2033 PQS log environment details on launch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/67fea166
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/67fea166
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/67fea166

Branch: refs/heads/calcite
Commit: 67fea1665d6ebb963e0dff335f513e4f61cbd22c
Parents: 31a1ca6
Author: Nick Dimiduk <nd...@apache.org>
Authored: Tue Jun 9 17:12:21 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Fri Jun 12 09:33:56 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/queryserver/server/Main.java | 69 ++++++++++++++++++++
 1 file changed, 69 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/67fea166/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
----------------------------------------------------------------------
diff --git a/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java b/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
index 55febc5..9f9bfc7 100644
--- a/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
+++ b/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
@@ -34,7 +34,12 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
 import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
@@ -50,6 +55,11 @@ public final class Main extends Configured implements Tool, Runnable {
       "phoenix.queryserver.http.port";
   public static final int DEFAULT_HTTP_PORT = 8765;
 
+  public static final String QUERY_SERVER_ENV_LOGGING_KEY =
+          "phoenix.queryserver.envvars.logging.disabled";
+  public static final String QUERY_SERVER_ENV_LOGGING_SKIPWORDS_KEY =
+          "phoenix.queryserver.envvars.logging.skipwords";
+
   public static final String KEYTAB_FILENAME_KEY = "phoenix.queryserver.keytab.file";
   public static final String KERBEROS_PRINCIPAL_KEY = "phoenix.queryserver.kerberos.principal";
   public static final String DNS_NAMESERVER_KEY = "phoenix.queryserver.dns.nameserver";
@@ -58,12 +68,70 @@ public final class Main extends Configured implements Tool, Runnable {
 
   protected static final Log LOG = LogFactory.getLog(Main.class);
 
+  @SuppressWarnings("serial")
+  private static final Set<String> DEFAULT_SKIP_WORDS = new HashSet<String>() {
+    {
+      add("secret");
+      add("passwd");
+      add("password");
+      add("credential");
+    }
+  };
+
   private final String[] argv;
   private final CountDownLatch runningLatch = new CountDownLatch(1);
   private HttpServer server = null;
   private int retCode = 0;
   private Throwable t = null;
 
+  /**
+   * Log information about the currently running JVM.
+   */
+  public static void logJVMInfo() {
+    // Print out vm stats before starting up.
+    RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
+    if (runtime != null) {
+      LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
+              runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
+      LOG.info("vmInputArguments=" + runtime.getInputArguments());
+    }
+  }
+
+  /**
+   * Logs information about the currently running JVM process including
+   * the environment variables. Logging of env vars can be disabled by
+   * setting {@code "phoenix.envvars.logging.disabled"} to {@code "true"}.
+   * <p>If enabled, you can also exclude environment variables containing
+   * certain substrings by setting {@code "phoenix.envvars.logging.skipwords"}
+   * to comma separated list of such substrings.
+   */
+  public static void logProcessInfo(Configuration conf) {
+    // log environment variables unless asked not to
+    if (conf == null || !conf.getBoolean(QUERY_SERVER_ENV_LOGGING_KEY, false)) {
+      Set<String> skipWords = new HashSet<String>(DEFAULT_SKIP_WORDS);
+      if (conf != null) {
+        String[] confSkipWords = conf.getStrings(QUERY_SERVER_ENV_LOGGING_SKIPWORDS_KEY);
+        if (confSkipWords != null) {
+          skipWords.addAll(Arrays.asList(confSkipWords));
+        }
+      }
+
+      nextEnv:
+      for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
+        String key = entry.getKey().toLowerCase();
+        String value = entry.getValue().toLowerCase();
+        // exclude variables which may contain skip words
+        for(String skipWord : skipWords) {
+          if (key.contains(skipWord) || value.contains(skipWord))
+            continue nextEnv;
+        }
+        LOG.info("env:"+entry);
+      }
+    }
+    // and JVM info
+    logJVMInfo();
+  }
+
   /** Constructor for use from {@link org.apache.hadoop.util.ToolRunner}. */
   public Main() {
     this(null, null);
@@ -112,6 +180,7 @@ public final class Main extends Configured implements Tool, Runnable {
 
   @Override
   public int run(String[] args) throws Exception {
+    logProcessInfo(getConf());
     try {
       // handle secure cluster credentials
       if ("kerberos".equalsIgnoreCase(getConf().get(HBASE_SECURITY_CONF_KEY))) {